code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCamelCase__ : int = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : int = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
lowerCamelCase__ : Optional[Any] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : str = list(s_dict.keys() )
for key in keys:
lowercase__ : int = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase__ : Union[str, Any] = new_key.replace(lowercase_ , lowercase_ )
print(F'{key} -> {new_key}' )
lowercase__ : List[str] = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase ( lowercase_ ) -> List[str]:
'''simple docstring'''
lowercase__ , lowercase__ : int = emb.weight.shape
lowercase__ : List[Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
lowercase__ : Any = emb.weight.data
return lin_layer
def UpperCamelCase ( lowercase_ , lowercase_ ) -> bytes:
'''simple docstring'''
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase__ : List[str] = os.path.basename(lowercase_ )
lowercase__ : Union[str, Any] = url.split("""/""" )[-2]
lowercase__ : int = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(lowercase_ ):
lowercase__ : Tuple = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=10_24 ) as loop:
while True:
lowercase__ : str = source.read(81_92 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
lowercase__ : Tuple = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowercase__ : Dict = _download(_MODELS[checkpoint_path] )
else:
lowercase__ : str = torch.load(lowercase_ , map_location="""cpu""" )
lowercase__ : List[Any] = original_checkpoint["""dims"""]
lowercase__ : Optional[Any] = original_checkpoint["""model_state_dict"""]
lowercase__ : str = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
lowercase__ : Any = True
lowercase__ : Any = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
lowercase__ : int = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
lowercase__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
lowercase__ , lowercase__ : int = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F' but all the following weights are missing {missing}' )
if tie_embeds:
lowercase__ : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase__ : Tuple = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase__ : Any = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 12
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ : Optional[int] = 4
lowercase__ : Optional[Any] = 48
lowercase__ : int = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : List[str] = [6, 6, 6, 6]
lowercase__ : Any = 60
lowercase__ : Tuple = [6, 6, 6, 6]
lowercase__ : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = 4
lowercase__ : Any = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ : str = 1
lowercase__ : Optional[int] = 1
lowercase__ : Optional[int] = 1_26
lowercase__ : Any = 7
lowercase__ : int = 255.0
lowercase__ : List[Any] = """"""
return config
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase__ : Union[str, Any] = """layernorm.weight"""
if name == "norm.bias":
lowercase__ : List[str] = """layernorm.bias"""
if "conv_first" in name:
lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase__ : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase__ : str = """swin2sr.""" + name
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase__ : Any = key.split(""".""" )
lowercase__ : List[Any] = int(key_split[1] )
lowercase__ : Dict = int(key_split[4] )
lowercase__ : Optional[Any] = config.embed_dim
if "weight" in key:
lowercase__ : List[str] = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : Optional[Any] = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
pass
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = get_config(lowercase_ )
lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ )
lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
lowercase__ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56
lowercase__ : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ : Union[str, Any] = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : int = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
lowercase__ : str = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase__ : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowerCamelCase__ : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 12
| 1
|
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any=False ) -> List[Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = len(set_a.intersection(_SCREAMING_SNAKE_CASE ) )
if alternative_union:
UpperCAmelCase_ : List[str] = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : str = len(set_a.union(_SCREAMING_SNAKE_CASE ) )
return intersection / union
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
UpperCAmelCase_ : Optional[int] = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase_ : List[str] = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE ) / union
else:
UpperCAmelCase_ : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
return None
if __name__ == "__main__":
_lowerCamelCase = {"""a""", """b""", """c""", """d""", """e"""}
_lowerCamelCase = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 720
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
_lowerCamelCase = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
_lowerCamelCase = re.compile(R"""([a-z\d])([A-Z])""")
_lowerCamelCase = re.compile(R"""(?<!_)_(?!_)""")
_lowerCamelCase = re.compile(R"""(_{2,})""")
_lowerCamelCase = R"""^\w+(\.\w+)*$"""
_lowerCamelCase = R"""<>:/\|?*"""
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = _uppercase_uppercase_re.sub(r"\1_\2" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = _lowercase_uppercase_re.sub(r"\1_\2" , _SCREAMING_SNAKE_CASE )
return name.lower()
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = _single_underscore_re.split(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = [_multiple_underscores_re.split(_SCREAMING_SNAKE_CASE ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) if n != "" )
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> Dict:
"""simple docstring"""
if os.path.basename(_SCREAMING_SNAKE_CASE ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
if os.path.basename(_SCREAMING_SNAKE_CASE ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , _SCREAMING_SNAKE_CASE ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(_SCREAMING_SNAKE_CASE )}-{split}'''
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = filename_prefix_for_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
UpperCAmelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return F'''{filepath}*'''
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Dict=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = filename_prefix_for_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if shard_lengths:
UpperCAmelCase_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_SCREAMING_SNAKE_CASE )]
if filetype_suffix:
UpperCAmelCase_ : Dict = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
UpperCAmelCase_ : List[str] = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 323
| 0
|
'''simple docstring'''
def A (__lowerCamelCase :int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if number < 0:
return False
_lowerCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ : Tuple = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 456
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Any = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "imagegpt"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Tuple , UpperCamelCase: Any=5_12 + 1 , UpperCamelCase: str=32 * 32 , UpperCamelCase: Optional[Any]=5_12 , UpperCamelCase: str=24 , UpperCamelCase: List[str]=8 , UpperCamelCase: str=None , UpperCamelCase: Dict="quick_gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: Dict=0.02 , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Any=True , UpperCamelCase: Tuple=False , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , **UpperCamelCase: List[Any] , ):
"""simple docstring"""
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = n_inner
A__ = activation_function
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = scale_attn_weights
A__ = use_cache
A__ = scale_attn_by_inverse_layer_idx
A__ = reorder_and_upcast_attn
A__ = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCamelCase , **UpperCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def UpperCamelCase ( self: int , UpperCamelCase: "FeatureExtractionMixin" , UpperCamelCase: int = 1 , UpperCamelCase: int = -1 , UpperCamelCase: bool = False , UpperCamelCase: Optional["TensorType"] = None , UpperCamelCase: int = 3 , UpperCamelCase: int = 32 , UpperCamelCase: int = 32 , ):
"""simple docstring"""
A__ = self._generate_dummy_images(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = dict(preprocessor(images=UpperCamelCase , return_tensors=UpperCamelCase ) )
return inputs
| 500
|
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( UpperCAmelCase_ : Callable[[float], float] , UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
A__ = a
A__ = b
if function(UpperCAmelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCAmelCase_ ) == 0:
return b
elif (
function(UpperCAmelCase_ ) * function(UpperCAmelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
A__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(UpperCAmelCase_ ) == 0:
return mid
elif function(UpperCAmelCase_ ) * function(UpperCAmelCase_ ) < 0:
A__ = mid
else:
A__ = mid
A__ = start + (end - start) / 2.0
return mid
def _snake_case ( UpperCAmelCase_ : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 500
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase_ : Optional[Any] = random.Random()
if is_torch_available():
import torch
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Dict=None , _lowerCamelCase : Union[str, Any]=None )-> Any:
'''simple docstring'''
if rng is None:
__snake_case = global_rng
__snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=2000 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1_6000 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , ) -> Optional[Any]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = min_seq_length
__snake_case = max_seq_length
__snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case = feature_size
__snake_case = padding_value
__snake_case = sampling_rate
__snake_case = return_attention_mask
__snake_case = do_normalize
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
def _flatten(__SCREAMING_SNAKE_CASE ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
__snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase ( _lowerCAmelCase , unittest.TestCase):
__lowercase : List[Any] = ASTFeatureExtractor
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = ASTFeatureExtractionTester(self )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__snake_case = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
__snake_case = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__snake_case = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test batched
__snake_case = feat_extract(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_values
__snake_case = feat_extract(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case = np.asarray(SCREAMING_SNAKE_CASE_ )
__snake_case = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_values
__snake_case = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
import torch
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case = np.random.rand(100 ).astype(np.floataa )
__snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
__snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__snake_case = ds.sort('''id''' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
__snake_case = self._load_datasamples(1 )
__snake_case = ASTFeatureExtractor()
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 24
|
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: List[str] = 0
UpperCAmelCase_: Tuple = len(lowerCAmelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase_: Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
UpperCAmelCase_: str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase_: int = left
UpperCAmelCase_: str = point
elif point > right:
UpperCAmelCase_: List[str] = right
UpperCAmelCase_: Optional[int] = point
else:
if item < current_item:
UpperCAmelCase_: Optional[Any] = point - 1
else:
UpperCAmelCase_: Any = point + 1
return None
def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Any , lowerCAmelCase__: Any ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase_: Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCAmelCase__ , lowerCAmelCase__ , point + 1 , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
if collection != sorted(lowerCAmelCase__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : int = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a : int = 67
a : Any = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 556
| 0
|
def __snake_case ( _UpperCamelCase ) -> Any:
_a = len(_UpperCamelCase )
_a = sum(_UpperCamelCase )
_a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_a = True
for i in range(1 , s + 1 ):
_a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_a = dp[i][j - 1]
if arr[i - 1] <= j:
_a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_a = s - 2 * j
break
return diff
| 706
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCamelCase :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346
| 0
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def UpperCamelCase (SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class lowercase__ :
"""simple docstring"""
__lowerCAmelCase : str = field(
metadata={"""help""": """The csv file to plot."""} , )
__lowerCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
__lowerCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
__lowerCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
__lowerCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
__lowerCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
__lowerCAmelCase : Optional[List[str]] = list_field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
try:
int(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
try:
float(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class lowercase__ :
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
UpperCamelCase : str = args
UpperCamelCase : List[str] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
UpperCamelCase : Tuple = csv.DictReader(_A )
for row in reader:
UpperCamelCase : Any = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
UpperCamelCase : str = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
UpperCamelCase : List[str] = float(row["""result"""] )
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Any = plt.subplots()
UpperCamelCase : Optional[Any] = """Time usage""" if self.args.is_time else """Memory usage"""
UpperCamelCase : List[str] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCamelCase : str = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
UpperCamelCase : Optional[Any] = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
UpperCamelCase : Optional[Any] = self.result_dict[model_name]["""result"""]
((UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCamelCase : int = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCamelCase : int = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_A , )
else:
UpperCamelCase : Optional[int] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCamelCase) , (UpperCamelCase)) : Any = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
UpperCamelCase : Optional[Any] = np.asarray(_A , _A )[: len(_A )]
plt.scatter(
_A , _A , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(_A , _A , """--""" )
title_str += f""" {label_model_name} vs."""
UpperCamelCase : Union[str, Any] = title_str[:-4]
UpperCamelCase : int = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(_A )
plt.xlabel(_A )
plt.ylabel(_A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def UpperCamelCase ():
UpperCamelCase : Tuple = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
UpperCamelCase : int = Plot(args=SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main()
| 102
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def a_ ( lowerCamelCase : Optional[Any] ):
return choice(lowerCamelCase )
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = random_pivot(lowerCamelCase )
# partition based on pivot
# linear time
lowerCAmelCase = [e for e in lst if e < pivot]
lowerCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCamelCase ) < k - 1:
return kth_number(lowerCamelCase , k - len(lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133
| 0
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_: Optional[Any] =''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class __A ( tr.AbstractTransform ):
def __init__(self : Optional[int] , __a : str = " " ):
UpperCAmelCase_ = sentence_delimiter
def _lowercase (self : int , __a : str ):
return list(__a )
def _lowercase (self : List[Any] , __a : List[str] ):
UpperCAmelCase_ = []
for sent_idx, sentence in enumerate(__a ):
chars.extend(self.process_string(__a ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__a ) - 1:
chars.append(self.sentence_delimiter )
return chars
SCREAMING_SNAKE_CASE_: Dict =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_: List[str] =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_: int ='\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_: Optional[int] ='\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _lowercase (self : Tuple , __a : List[Any] , __a : Any , __a : Optional[int]=False ):
if concatenate_texts:
return jiwer.compute_measures(
__a , __a , truth_transform=__a , hypothesis_transform=__a , )["wer"]
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for prediction, reference in zip(__a , __a ):
UpperCAmelCase_ = jiwer.compute_measures(
__a , __a , truth_transform=__a , hypothesis_transform=__a , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 713
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE_: Dict ='pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE_: str ='tf'
else:
SCREAMING_SNAKE_CASE_: str ='jax'
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Optional[Any] = PerceiverTokenizer
a__ : Union[str, Any] = False
def _lowercase (self : int ):
super().setUp()
UpperCAmelCase_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase (self : Dict ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def _lowercase (self : List[str] , **__a : Dict ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def _lowercase (self : str , __a : Dict , __a : Dict=False , __a : List[str]=20 , __a : Optional[int]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCAmelCase_ = []
for i in range(len(__a ) ):
try:
UpperCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=__a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase_ = list(filter(lambda __a : re.match(r"^[ a-zA-Z]+$" , t[1] ) , __a ) )
UpperCAmelCase_ = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
UpperCAmelCase_ = " " + output_txt
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = "Unicode €."
UpperCAmelCase_ = tokenizer(__a )
UpperCAmelCase_ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , __a )
# decoding
UpperCAmelCase_ = tokenizer.decode(__a )
self.assertEqual(__a , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase_ = tokenizer("e è é ê ë" )
UpperCAmelCase_ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , __a )
# decoding
UpperCAmelCase_ = tokenizer.decode(__a )
self.assertEqual(__a , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase_ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(__a , padding=__a , return_tensors=__a )
self.assertIsInstance(__a , __a )
if FRAMEWORK != "jax":
UpperCAmelCase_ = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase_ = tokenizer(__a , padding=__a , return_tensors=__a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __a )
self.assertIn("attention_mask" , __a )
self.assertNotIn("decoder_input_ids" , __a )
self.assertNotIn("decoder_attention_mask" , __a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase_ = tokenizer(
text_target=__a , max_length=32 , padding="max_length" , truncation=__a , return_tensors=__a )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowercase (self : Any ):
# safety check on max_len default value so we are sure the test works
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__a )
UpperCAmelCase_ = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
shutil.rmtree(__a )
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__a )
UpperCAmelCase_ = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__a )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__a )
with open(os.path.join(__a , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ = json.load(__a )
with open(os.path.join(__a , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ = json.load(__a )
UpperCAmelCase_ = [f"""<extra_id_{i}>""" for i in range(125 )]
UpperCAmelCase_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__a , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__a , __a )
with open(os.path.join(__a , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__a , __a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(
__a , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__a )]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def _lowercase (self : int ):
UpperCAmelCase_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def _lowercase (self : Optional[int] ):
pass
def _lowercase (self : List[str] ):
pass
def _lowercase (self : Tuple ):
pass
def _lowercase (self : List[Any] ):
pass
def _lowercase (self : int ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCAmelCase_ = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(__a , __a )
| 415
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : List[str] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_UpperCAmelCase : Optional[int] = '''\
Text data.
Second line of data.'''
_UpperCAmelCase : Optional[Any] = '''file'''
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
_A = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
_A = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] ):
with open(os.path.join(tmpfs.local_root_dir , __snake_case ) , 'w' ) as f:
f.write(__snake_case )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : List[str] ):
_A = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
_A = input_paths[compression_format]
_A = tmp_path / 'cache'
_A = DownloadConfig(cache_dir=__snake_case , extract_compressed_file=__snake_case )
_A = cached_path(__snake_case , download_config=__snake_case )
with open(__snake_case ) as f:
_A = f.read()
with open(__snake_case ) as f:
_A = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : List[str] ):
_A = 'custom_cache'
_A = 'custom_extracted_dir'
_A = tmp_path / 'custom_extracted_path'
if default_extracted:
_A = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __snake_case )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__snake_case ) )
_A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_A = xz_file
_A = (
DownloadConfig(extract_compressed_file=__snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__snake_case )
)
_A = cached_path(__snake_case , download_config=__snake_case )
assert Path(__snake_case ).parent.parts[-2:] == expected
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
# absolute path
_A = str(Path(__snake_case ).resolve() )
assert cached_path(__snake_case ) == text_file
# relative path
_A = str(Path(__snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__snake_case ) == text_file
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
# absolute path
_A = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(__snake_case ):
cached_path(__snake_case )
# relative path
_A = './__missing_file__.txt'
with pytest.raises(__snake_case ):
cached_path(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
_A = get_from_cache(F'tmp://{tmpfs_file}' )
with open(__snake_case ) as f:
_A = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( ):
with pytest.raises(__snake_case ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple ):
_A = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
http_get('https://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
_A = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
ftp_get('ftp://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
_A = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
fsspec_get('s3://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
fsspec_head('s3://huggingface.co' )
| 107
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''unispeech-sat'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=320 , lowerCamelCase__=2 , lowerCamelCase__=0.1 , lowerCamelCase__=100 , lowerCamelCase__=256 , lowerCamelCase__=256 , lowerCamelCase__=0.1 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=(512, 512, 512, 512, 1_500) , lowerCamelCase__=(5, 3, 3, 1, 1) , lowerCamelCase__=(1, 2, 3, 1, 1) , lowerCamelCase__=512 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=504 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = num_clusters
__lowerCamelCase = do_stable_layer_norm
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCamelCase = num_codevectors_per_group
__lowerCamelCase = num_codevector_groups
__lowerCamelCase = contrastive_logits_temperature
__lowerCamelCase = feat_quantizer_dropout
__lowerCamelCase = num_negatives
__lowerCamelCase = codevector_dim
__lowerCamelCase = proj_codevector_dim
__lowerCamelCase = diversity_loss_weight
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = xvector_output_dim
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 167
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = StableDiffusionControlNetImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowerCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Dict:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = 2
__lowerCamelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase__ , device=torch.device(lowerCamelCase__ ) , )
__lowerCamelCase = floats_tensor(control_image.shape , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def lowercase_ ( self ) -> str:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = StableDiffusionControlNetImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase__ )
torch.manual_seed(0 )
__lowerCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase__ )
torch.manual_seed(0 )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase = MultiControlNetModel([controlneta, controlneta] )
__lowerCamelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = 2
__lowerCamelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase__ , device=torch.device(lowerCamelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase__ , device=torch.device(lowerCamelCase__ ) , ),
]
__lowerCamelCase = floats_tensor(control_image[0].shape , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
__lowerCamelCase = 10.0
__lowerCamelCase = 4
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = steps
__lowerCamelCase = scale
__lowerCamelCase = pipe(**lowerCamelCase__ )[0]
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = steps
__lowerCamelCase = scale
__lowerCamelCase = pipe(**lowerCamelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = steps
__lowerCamelCase = scale
__lowerCamelCase = pipe(**lowerCamelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = steps
__lowerCamelCase = scale
__lowerCamelCase = pipe(**lowerCamelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCamelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__lowerCamelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase__ , controlnet=lowerCamelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase = 'evil space-punk bird'
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
__lowerCamelCase = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
__lowerCamelCase = pipe(
lowerCamelCase__ , lowerCamelCase__ , control_image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__lowerCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 167
| 1
|
from __future__ import annotations
import math
def lowercase ( SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
SCREAMING_SNAKE_CASE__ : Optional[int] = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def lowercase ( SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
SCREAMING_SNAKE_CASE_ = []
for num in range(len(__SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE_ = odd_composites[num] - 2 * i * i
if is_prime(__SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def lowercase ( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 205
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=[30, 30] , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=None , lowerCAmelCase=8 , lowerCAmelCase=10 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = n_targets
UpperCAmelCase_ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase_ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase_ = num_patches + 1 + self.num_detection_tokens
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase_ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase_ = []
for i in range(self.batch_size ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=lowerCAmelCase )
UpperCAmelCase_ = torch.rand(self.n_targets , 4 , device=lowerCAmelCase )
labels.append(lowerCAmelCase )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = YolosModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = YolosForObjectDetection(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(pixel_values=lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase_ = model(pixel_values=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase_ : str = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase_ : str = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Dict = False
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
UpperCAmelCase_ = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase_ = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = torch.ones(
size=(self.model_tester.n_targets,) , device=lowerCAmelCase , dtype=torch.long )
UpperCAmelCase_ = torch.ones(
self.model_tester.n_targets , 4 , device=lowerCAmelCase , dtype=torch.float )
labels.append(lowerCAmelCase )
UpperCAmelCase_ = labels
return inputs_dict
def A__ ( self ):
UpperCAmelCase_ = YolosModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
# YOLOS does not use inputs_embeds
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
# in YOLOS, the seq_len is different
UpperCAmelCase_ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase_ = len(lowerCAmelCase )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = 1
self.assertEqual(out_len + added_hidden_states , len(lowerCAmelCase ) )
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# YOLOS has a different seq_length
UpperCAmelCase_ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = YolosModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case__ ( ) -> Optional[Any]:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase_ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=lowerCAmelCase , )
UpperCAmelCase_ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify postprocessing
UpperCAmelCase_ = image_processor.post_process_object_detection(
lowerCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase_ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(lowerCAmelCase )
UpperCAmelCase_ = [75, 75, 17, 63, 17]
UpperCAmelCase_ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(lowerCAmelCase )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , lowerCAmelCase , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , lowerCAmelCase )
self.assertTrue(torch.allclose(results["boxes"][0, :] , lowerCAmelCase ) )
| 579
| 0
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
for line in lines:
__A = re.sub(r'''#.*''' , '''''' , __UpperCamelCase ) # remove comments
if line:
filtered_lines.append(__UpperCamelCase )
__A = '''\n'''.join(__UpperCamelCase )
# Make a hash from all this code
__A = full_str.encode('''utf-8''' )
return shaaaa(__UpperCamelCase ).hexdigest()
# get importable module names and hash for caching
lowercase_ = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowercase_ = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowercase_ = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowercase_ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 215
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int], _lowerCamelCase : Dict, _lowerCamelCase : Optional[Any]=13, _lowerCamelCase : str=32, _lowerCamelCase : str=2, _lowerCamelCase : str=3, _lowerCamelCase : Optional[Any]=16, _lowerCamelCase : int=[32, 64, 1_28], _lowerCamelCase : Dict=[1, 2, 1], _lowerCamelCase : Union[str, Any]=[2, 2, 4], _lowerCamelCase : Any=2, _lowerCamelCase : List[str]=2.0, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Dict=0.0, _lowerCamelCase : Optional[int]=0.0, _lowerCamelCase : str=0.1, _lowerCamelCase : Optional[int]="gelu", _lowerCamelCase : int=False, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : str=0.02, _lowerCamelCase : Tuple=1e-5, _lowerCamelCase : List[str]=True, _lowerCamelCase : List[str]=None, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Optional[Any]=10, _lowerCamelCase : str=8, _lowerCamelCase : Optional[int]=["stage1", "stage2"], _lowerCamelCase : Optional[int]=[1, 2], ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = embed_dim
__A = hidden_sizes
__A = depths
__A = num_heads
__A = window_size
__A = mlp_ratio
__A = qkv_bias
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = drop_path_rate
__A = hidden_act
__A = use_absolute_embeddings
__A = patch_norm
__A = layer_norm_eps
__A = initializer_range
__A = is_training
__A = scope
__A = use_labels
__A = type_sequence_label_size
__A = encoder_stride
__A = out_features
__A = out_indices
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size], self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, hidden_sizes=self.hidden_sizes, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any], _lowerCamelCase : Any, _lowerCamelCase : int ):
'''simple docstring'''
__A = FocalNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
__A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : str, _lowerCamelCase : List[Any], _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__A = None
__A = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : List[str], _lowerCamelCase : int, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__A = FocalNetForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = FocalNetForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[int], _lowerCamelCase : str, _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = self.type_sequence_label_size
__A = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A_ : Dict = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = False
A_ : Optional[int] = False
A_ : Union[str, Any] = False
A_ : List[Any] = False
A_ : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FocalNetModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase, embed_dim=37, has_text_modality=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__A = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase, nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__A = model_class(_lowerCamelCase )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['''pixel_values''']
self.assertListEqual(arg_names[:1], _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Any, _lowerCamelCase : Any, _lowerCamelCase : List[Any], _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) )
__A = outputs.hidden_states
__A = getattr(
self.model_tester, '''expected_num_hidden_layers''', len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ), _lowerCamelCase )
# FocalNet has a different seq_length
__A = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
__A = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCamelCase ), _lowerCamelCase )
__A , __A , __A , __A = reshaped_hidden_states[0].shape
__A = (
reshaped_hidden_states[0].view(_lowerCamelCase, _lowerCamelCase, height * width ).permute(0, 2, 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__A = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, (padded_height, padded_width) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = FocalNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
__A = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(_lowerCamelCase )
__A = self.default_image_processor
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__A = image_processor(images=_lowerCamelCase, return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__A = model(**_lowerCamelCase )
# verify the logits
__A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape, _lowerCamelCase )
__A = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _lowerCamelCase, atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(), 2_81 )
@require_torch
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A_ : str = FocalNetConfig
A_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FocalNetModelTester(self )
| 215
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"image": Image()} )
_UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
_UpperCamelCase : str = "image"
_UpperCamelCase : str = "labels"
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : int ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCamelCase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
_snake_case : Tuple = copy.deepcopy(self )
_snake_case : Optional[Any] = self.label_schema.copy()
_snake_case : str = features[self.label_column]
_snake_case : List[str] = label_schema
return task_template
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 304
|
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCAmelCase ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Base Case
if index == len(__lowerCAmelCase ):
return True
# Recursive Step
for i in range(__lowerCAmelCase ):
if valid_coloring(graph[index] , __lowerCAmelCase , __lowerCAmelCase ):
# Color current vertex
_snake_case : int = i
# Validate coloring
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 ):
return True
# Backtrack
_snake_case : Optional[Any] = -1
return False
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = [-1] * len(__lowerCAmelCase )
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 ):
return colored_vertices
return []
| 304
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = '''ctrl'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , UpperCAmelCase=24_65_34 , UpperCAmelCase=2_56 , UpperCAmelCase=12_80 , UpperCAmelCase=81_92 , UpperCAmelCase=48 , UpperCAmelCase=16 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-6 , UpperCAmelCase=0.02 , UpperCAmelCase=True , **UpperCAmelCase , )-> Union[str, Any]:
__A = vocab_size
__A = n_positions
__A = n_embd
__A = n_layer
__A = n_head
__A = dff
__A = resid_pdrop
__A = embd_pdrop
__A = layer_norm_epsilon
__A = initializer_range
__A = use_cache
super().__init__(**UpperCAmelCase )
| 341
|
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , UpperCAmelCase )-> None:
__A = len(UpperCAmelCase )
__A = [0] * len_array
if len_array > 0:
__A = array[0]
for i in range(1 , UpperCAmelCase ):
__A = self.prefix_sum[i - 1] + array[i]
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase )-> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> bool:
__A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
| 1
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=9_9 , _lowercase=1_3 , _lowercase=1_6 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=2 , _lowercase=3_2 , _lowercase=4 , _lowercase=4 , _lowercase=3_0 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=None , ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Union[str, Any] = decoder_seq_length
# For common tests
snake_case_ : Optional[Any] = self.decoder_seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : Optional[Any] = use_attention_mask
snake_case_ : Any = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[int] = d_model
snake_case_ : Dict = d_model
snake_case_ : Optional[int] = decoder_layers
snake_case_ : List[Any] = decoder_layers
snake_case_ : Optional[Any] = decoder_ffn_dim
snake_case_ : Optional[int] = decoder_attention_heads
snake_case_ : Tuple = decoder_attention_heads
snake_case_ : Tuple = eos_token_id
snake_case_ : Optional[Any] = bos_token_id
snake_case_ : str = pad_token_id
snake_case_ : List[str] = decoder_start_token_id
snake_case_ : List[Any] = use_cache
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : str = None
snake_case_ : int = decoder_seq_length
snake_case_ : int = 2
snake_case_ : Dict = 1
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : List[str] = None
if self.use_attention_mask:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case_ : int = None
if self.use_labels:
snake_case_ : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = True
snake_case_ : Tuple = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
snake_case_ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case_ : Any = model(_lowercase , use_cache=_lowercase )
snake_case_ : Union[str, Any] = model(_lowercase )
snake_case_ : str = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
snake_case_ : str = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
snake_case_ : List[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case_ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : Union[str, Any] = model(_lowercase )["""last_hidden_state"""]
snake_case_ : Optional[Any] = model(_lowercase , past_key_values=_lowercase )["""last_hidden_state"""]
# select random slice
snake_case_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case_ : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = config_and_inputs
snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
_lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
_lowerCamelCase = True
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
snake_case_ : int = ConfigTester(self , config_class=_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
| 58
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> list:
__snake_case = len(_UpperCAmelCase )
__snake_case = []
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
__snake_case = True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
__snake_case = False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 69
| 0
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase__ ( UpperCAmelCase_ = "laptop" )-> DataFrame:
"""simple docstring"""
UpperCamelCase = F"https://www.amazon.in/laptop/s?k={product}"
UpperCamelCase = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
UpperCamelCase = BeautifulSoup(requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).text )
# Initialize a Pandas dataframe with the column titles
UpperCamelCase = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
UpperCamelCase = item.ha.text
UpperCamelCase = "https://www.amazon.in/" + item.ha.a["href"]
UpperCamelCase = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
UpperCamelCase = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
UpperCamelCase = "Not available"
try:
UpperCamelCase = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
UpperCamelCase = ""
try:
UpperCamelCase = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 1_00 )
except ValueError:
UpperCamelCase = float("nan" )
except AttributeError:
pass
UpperCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
UpperCamelCase = " "
UpperCamelCase = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = """headphones"""
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 556
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __a ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCamelCase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3 ) )
| 556
| 1
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[int]:
_UpperCAmelCase = np.inf
def set_batch_size(__snake_case ) -> None:
nonlocal batch_size
if isinstance(__snake_case , __snake_case ):
_UpperCAmelCase = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__snake_case , __snake_case ):
_UpperCAmelCase = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__snake_case , __snake_case ) and feature.dtype == "binary":
_UpperCAmelCase = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__snake_case , __snake_case )
return None if batch_size is np.inf else batch_size
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : NestedDataStructureLike[PathLike] , lowerCamelCase : Optional[NamedSplit] = None , lowerCamelCase : Optional[Features] = None , lowerCamelCase : str = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : Optional[int] = None , **lowerCamelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCamelCase , split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , num_proc=lowerCamelCase , **lowerCamelCase , )
_UpperCAmelCase = path_or_paths if isinstance(lowerCamelCase , lowerCamelCase ) else {self.split: path_or_paths}
_UpperCAmelCase = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
_UpperCAmelCase = Parquet(
cache_dir=lowerCamelCase , data_files=lowerCamelCase , features=lowerCamelCase , hash=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
# Build iterable dataset
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase , download_mode=lowerCamelCase , verification_mode=lowerCamelCase , base_path=lowerCamelCase , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Dataset , lowerCamelCase : Union[PathLike, BinaryIO] , lowerCamelCase : Optional[int] = None , **lowerCamelCase : Any , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = path_or_buf
_UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features )
_UpperCAmelCase = parquet_writer_kwargs
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
_UpperCAmelCase = self._write(file_obj=lowerCamelCase , batch_size=lowerCamelCase , **self.parquet_writer_kwargs )
else:
_UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=lowerCamelCase , **self.parquet_writer_kwargs )
return written
def lowerCamelCase ( self : Dict , lowerCamelCase : BinaryIO , lowerCamelCase : int , **lowerCamelCase : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = parquet_writer_kwargs.pop("""path_or_buf""" , lowerCamelCase )
_UpperCAmelCase = self.dataset.features.arrow_schema
_UpperCAmelCase = pq.ParquetWriter(lowerCamelCase , schema=lowerCamelCase , **lowerCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowerCamelCase ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
_UpperCAmelCase = query_table(
table=self.dataset._data , key=slice(lowerCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowerCamelCase )
written += batch.nbytes
writer.close()
return written
| 108
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : int ):
'''simple docstring'''
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Any ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = TextDatasetReader(snake_case , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_text_dataset(snake_case , snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Dict ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = TextDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
_check_text_dataset(snake_case , snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : str , snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
snake_case_ = TextDatasetReader(snake_case , cache_dir=snake_case , split=snake_case ).read()
_check_text_dataset(snake_case , snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCamelCase_( snake_case : str , snake_case : Union[str, Any] , snake_case : Dict ):
'''simple docstring'''
if issubclass(snake_case , snake_case ):
snake_case_ = text_path
elif issubclass(snake_case , snake_case ):
snake_case_ = [text_path]
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
snake_case_ = TextDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_text_dataset(snake_case , snake_case )
def UpperCamelCase_( snake_case : str , snake_case : int , snake_case : Dict=("train",) ):
'''simple docstring'''
assert isinstance(snake_case , snake_case )
for split in splits:
snake_case_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCamelCase_( snake_case : List[str] , snake_case : Dict , snake_case : Tuple ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = TextDatasetReader({"train": text_path} , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_text_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[str] , snake_case : int ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ = {"text": "string"}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = TextDatasetReader({"train": text_path} , features=snake_case , cache_dir=snake_case ).read()
_check_text_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCamelCase_( snake_case : str , snake_case : Any , snake_case : Any ):
'''simple docstring'''
if split:
snake_case_ = {split: text_path}
else:
snake_case_ = "train"
snake_case_ = {"train": text_path, "test": text_path}
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
snake_case_ = TextDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_text_datasetdict(snake_case , snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 400
| 0
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (DDPMParallelScheduler,)
def lowercase_ ( self : Optional[int], **_snake_case : Union[str, Any] ) ->Union[str, Any]:
snake_case__ : Any = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__lowerCAmelCase )
return config
def lowercase_ ( self : Optional[int] ) ->Tuple:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def lowercase_ ( self : List[str] ) ->int:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1], [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase, beta_end=__lowerCAmelCase )
def lowercase_ ( self : Optional[Any] ) ->Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def lowercase_ ( self : Dict ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def lowercase_ ( self : List[str] ) ->Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def lowercase_ ( self : Dict ) ->Dict:
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase, prediction_type=__lowerCAmelCase, sample_max_value=__lowerCAmelCase, )
def lowercase_ ( self : str ) ->Tuple:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def lowercase_ ( self : Dict ) ->Optional[int]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__lowerCAmelCase )
def lowercase_ ( self : Tuple ) ->Any:
snake_case__ : int = self.scheduler_classes[0]
snake_case__ : int = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1e-5
def lowercase_ ( self : int ) ->Tuple:
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : Union[str, Any] = self.get_scheduler_config()
snake_case__ : Any = scheduler_class(**__lowerCAmelCase )
snake_case__ : Optional[Any] = len(__lowerCAmelCase )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Dict = self.dummy_sample_deter
snake_case__ : Any = self.dummy_sample_deter + 0.1
snake_case__ : Optional[Any] = self.dummy_sample_deter - 0.1
snake_case__ : int = samplea.shape[0]
snake_case__ : Any = torch.stack([samplea, samplea, samplea], dim=0 )
snake_case__ : Union[str, Any] = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1, __lowerCAmelCase )
snake_case__ : Optional[Any] = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
snake_case__ : List[Any] = scheduler.batch_step_no_noise(__lowerCAmelCase, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ) )
snake_case__ : Tuple = torch.sum(torch.abs(__lowerCAmelCase ) )
snake_case__ : Optional[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def lowercase_ ( self : Optional[int] ) ->Union[str, Any]:
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
snake_case__ : Any = len(__lowerCAmelCase )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Tuple = self.dummy_sample_deter
snake_case__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(__lowerCAmelCase ) ):
# 1. predict noise residual
snake_case__ : Union[str, Any] = model(__lowerCAmelCase, __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
snake_case__ : Union[str, Any] = scheduler.step(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, generator=__lowerCAmelCase ).prev_sample
snake_case__ : int = pred_prev_sample
snake_case__ : Tuple = torch.sum(torch.abs(__lowerCAmelCase ) )
snake_case__ : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def lowercase_ ( self : Any ) ->Any:
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config(prediction_type='v_prediction' )
snake_case__ : Any = scheduler_class(**__lowerCAmelCase )
snake_case__ : str = len(__lowerCAmelCase )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Union[str, Any] = self.dummy_sample_deter
snake_case__ : str = torch.manual_seed(0 )
for t in reversed(range(__lowerCAmelCase ) ):
# 1. predict noise residual
snake_case__ : Union[str, Any] = model(__lowerCAmelCase, __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
snake_case__ : Union[str, Any] = scheduler.step(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, generator=__lowerCAmelCase ).prev_sample
snake_case__ : List[Any] = pred_prev_sample
snake_case__ : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
snake_case__ : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def lowercase_ ( self : Dict ) ->str:
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : List[str] = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**__lowerCAmelCase )
snake_case__ : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
snake_case__ : int = scheduler.timesteps
for i, timestep in enumerate(__lowerCAmelCase ):
if i == len(__lowerCAmelCase ) - 1:
snake_case__ : int = -1
else:
snake_case__ : int = timesteps[i + 1]
snake_case__ : Any = scheduler.previous_timestep(__lowerCAmelCase )
snake_case__ : Dict = prev_t.item()
self.assertEqual(__lowerCAmelCase, __lowerCAmelCase )
def lowercase_ ( self : Tuple ) ->str:
snake_case__ : int = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**__lowerCAmelCase )
snake_case__ : Optional[int] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__lowerCAmelCase, msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
def lowercase_ ( self : int ) ->List[Any]:
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Tuple = self.get_scheduler_config()
snake_case__ : Tuple = scheduler_class(**__lowerCAmelCase )
snake_case__ : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
snake_case__ : Tuple = len(__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase, msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase, timesteps=__lowerCAmelCase )
def lowercase_ ( self : List[Any] ) ->Optional[int]:
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**__lowerCAmelCase )
snake_case__ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCAmelCase, msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}', ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
| 710
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ :Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Tuple = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
a_ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 243
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _snake_case ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = (DPMSolverSinglestepScheduler,)
__lowerCAmelCase : Optional[int] = (('num_inference_steps', 25),)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float("""inf"""),
'variance_type': None,
}
config.update(**UpperCamelCase_)
return config
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = dict(self.forward_default_kwargs)
lowercase__ : str = kwargs.pop("""num_inference_steps""" , UpperCamelCase_)
lowercase__ : Any = self.dummy_sample
lowercase__ : Union[str, Any] = 0.1 * sample
lowercase__ : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowercase__ : List[str] = self.get_scheduler_config(**UpperCamelCase_)
lowercase__ : Union[str, Any] = scheduler_class(**UpperCamelCase_)
scheduler.set_timesteps(UpperCamelCase_)
# copy over dummy past residuals
lowercase__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_)
lowercase__ : str = scheduler_class.from_pretrained(UpperCamelCase_)
new_scheduler.set_timesteps(UpperCamelCase_)
# copy over dummy past residuals
lowercase__ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ : Union[str, Any] = sample, sample
for t in range(UpperCamelCase_ , time_step + scheduler.config.solver_order + 1):
lowercase__ : str = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
lowercase__ : List[Any] = new_scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = dict(self.forward_default_kwargs)
lowercase__ : Tuple = kwargs.pop("""num_inference_steps""" , UpperCamelCase_)
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Union[str, Any] = 0.1 * sample
lowercase__ : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**UpperCamelCase_)
scheduler.set_timesteps(UpperCamelCase_)
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_)
lowercase__ : Dict = scheduler_class.from_pretrained(UpperCamelCase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_)
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
lowercase__ : Dict = new_scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if scheduler is None:
lowercase__ : int = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config(**UpperCamelCase_)
lowercase__ : Optional[Any] = scheduler_class(**UpperCamelCase_)
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : Any = self.get_scheduler_config(**UpperCamelCase_)
lowercase__ : List[str] = scheduler_class(**UpperCamelCase_)
lowercase__ : Any = 10
lowercase__ : List[Any] = self.dummy_model()
lowercase__ : int = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_)
for i, t in enumerate(scheduler.timesteps):
lowercase__ : Any = model(UpperCamelCase_ , UpperCamelCase_)
lowercase__ : Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_).prev_sample
return sample
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
lowercase__ : int = 50
lowercase__ : List[str] = self.dummy_model()
lowercase__ : str = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
lowercase__ : Any = model(UpperCamelCase_ , UpperCamelCase_)
lowercase__ : List[str] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_).prev_sample
lowercase__ : str = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_mean.item() - 0.2_5_7_4) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
lowercase__ : Union[str, Any] = self.full_loop(scheduler=UpperCamelCase_)
lowercase__ : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
lowercase__ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config)
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase__ : str = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase__ : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase__ : int = self.full_loop(scheduler=UpperCamelCase_)
lowercase__ : str = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase_)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , algorithm_type="""dpmsolver++""" , solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_)
def lowercase__ ( self):
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , prediction_type=UpperCamelCase_ , algorithm_type=UpperCamelCase_ , )
lowercase__ : str = self.full_loop(
solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , prediction_type=UpperCamelCase_ , algorithm_type=UpperCamelCase_ , )
assert not torch.isnan(UpperCamelCase_).any(), "Samples have nan numbers"
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCamelCase_)
self.check_over_configs(lower_order_final=UpperCamelCase_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("""inf"""))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(variance_type=UpperCamelCase_)
self.check_over_configs(variance_type="""learned_range""")
def lowercase__ ( self):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=UpperCamelCase_ , time_step=0)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.full_loop()
lowercase__ : Optional[int] = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.full_loop(use_karras_sigmas=UpperCamelCase_)
lowercase__ : str = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_mean.item() - 0.2_2_4_8) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.full_loop(prediction_type="""v_prediction""")
lowercase__ : str = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_mean.item() - 0.1_4_5_3) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=UpperCamelCase_)
lowercase__ : List[str] = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_mean.item() - 0.0_6_4_9) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0)
lowercase__ : Dict = scheduler_class(**UpperCamelCase_)
lowercase__ : Optional[int] = 10
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase_)
for i, t in enumerate(scheduler.timesteps):
lowercase__ : Tuple = model(UpperCamelCase_ , UpperCamelCase_)
lowercase__ : Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_).prev_sample
assert sample.dtype == torch.floataa
| 12
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : int = 32 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_55 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCamelCase_ : bool = True , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=30 , UpperCamelCase_ : Tuple=4_00 , UpperCamelCase_ : List[Any]=3 , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :List[str] = parent
SCREAMING_SNAKE_CASE__ :Tuple = do_resize
SCREAMING_SNAKE_CASE__ :List[Any] = size if size is not None else {'shortest_edge': 2_88}
SCREAMING_SNAKE_CASE__ :str = size_divisor
SCREAMING_SNAKE_CASE__ :Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ :Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE__ :str = do_normalize
SCREAMING_SNAKE_CASE__ :int = do_center_crop
SCREAMING_SNAKE_CASE__ :Optional[Any] = image_mean
SCREAMING_SNAKE_CASE__ :str = image_std
SCREAMING_SNAKE_CASE__ :Optional[Any] = do_pad
SCREAMING_SNAKE_CASE__ :Tuple = batch_size
SCREAMING_SNAKE_CASE__ :List[str] = num_channels
SCREAMING_SNAKE_CASE__ :Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ :Optional[Any] = max_resolution
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __lowerCamelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ :Dict = self.size['shortest_edge']
SCREAMING_SNAKE_CASE__ :List[Any] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ :Optional[int] = size / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :List[Any] = size, scale * w
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = scale * h, size
SCREAMING_SNAKE_CASE__ :Any = int((13_33 / 8_00) * size )
if max(UpperCamelCase_ , UpperCamelCase_ ) > max_size:
SCREAMING_SNAKE_CASE__ :Tuple = max_size / max(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = newh * scale
SCREAMING_SNAKE_CASE__ :Any = neww * scale
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ :Any = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
SCREAMING_SNAKE_CASE__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ :List[str] = BridgeTowerImageProcessingTester(self )
@property
def __lowerCamelCase ( self : int ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'size_divisor' ) )
def __lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
pass
def __lowerCamelCase ( self : int ) -> Dict:
# Initialize image processor
SCREAMING_SNAKE_CASE__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ :str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ :Any = image_processing(UpperCamelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
# Initialize image processor
SCREAMING_SNAKE_CASE__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ :str = image_processing(UpperCamelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[str] ) -> List[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ :List[Any] = image_processing(UpperCamelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 209
| 0
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_lowerCamelCase : List[str] = True
except (ImportError, AttributeError):
_lowerCamelCase : int = object
def lowercase_ ( *_UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
pass
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Optional[Any] = logging.get_logger('transformers-cli/serving')
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_UpperCAmelCase , args.host , args.port , args.workers )
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : dict
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : List[str]
__lowerCAmelCase : Optional[List[int]]
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : str
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Any
class lowercase ( __UpperCAmelCase):
@staticmethod
def a_ ( _lowerCamelCase : ArgumentParser ):
"""simple docstring"""
A_ : Optional[int] = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=_lowerCamelCase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=_lowerCamelCase , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=_lowerCamelCase , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=_lowerCamelCase , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=_lowerCamelCase , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=_lowerCamelCase , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=_lowerCamelCase , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=_lowerCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=_lowerCamelCase )
def __init__( self : Optional[Any] , _lowerCamelCase : Pipeline , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Optional[int] = pipeline
A_ : Optional[Any] = host
A_ : List[Any] = port
A_ : List[str] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
A_ : List[Any] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''POST'''] , ),
] , timeout=6_00 , )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def a_ ( self : Any ):
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def a_ ( self : Union[str, Any] , _lowerCamelCase : str = Body(_lowerCamelCase , embed=_lowerCamelCase ) , _lowerCamelCase : bool = Body(_lowerCamelCase , embed=_lowerCamelCase ) ):
"""simple docstring"""
try:
A_ : Optional[int] = self._pipeline.tokenizer.tokenize(_lowerCamelCase )
if return_ids:
A_ : str = self._pipeline.tokenizer.convert_tokens_to_ids(_lowerCamelCase )
return ServeTokenizeResult(tokens=_lowerCamelCase , tokens_ids=_lowerCamelCase )
else:
return ServeTokenizeResult(tokens=_lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(_lowerCamelCase )} )
def a_ ( self : Any , _lowerCamelCase : List[int] = Body(_lowerCamelCase , embed=_lowerCamelCase ) , _lowerCamelCase : bool = Body(_lowerCamelCase , embed=_lowerCamelCase ) , _lowerCamelCase : bool = Body(_lowerCamelCase , embed=_lowerCamelCase ) , ):
"""simple docstring"""
try:
A_ : List[Any] = self._pipeline.tokenizer.decode(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return ServeDeTokenizeResult(model='''''' , text=_lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(_lowerCamelCase )} )
async def a_ ( self : str , _lowerCamelCase : Optional[int]=Body(_lowerCamelCase , embed=_lowerCamelCase ) ):
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
A_ : Dict = self._pipeline(_lowerCamelCase )
return ServeForwardResult(output=_lowerCamelCase )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(_lowerCamelCase )} )
| 702
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : int = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
| 0
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowercase_ ( unittest.TestCase , __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = load_tool('text-classification' )
self.tool.setup()
_A = load_tool('text-classification' , remote=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_A = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
def lowerCAmelCase_ ( self : Any ):
_A = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
| 7
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = StableDiffusionInstructPixaPixPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : str ) -> Optional[int]:
torch.manual_seed(0 )
__magic_name__: str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
__magic_name__: Union[str, Any] = PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
__magic_name__: Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__: int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__magic_name__: Optional[int] = CLIPTextModel(__snake_case )
__magic_name__: Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__: Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : int , __snake_case : List[Any]=0 ) -> Optional[Any]:
__magic_name__: Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__: Any = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" )
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: Optional[Any] = torch.manual_seed(__snake_case )
else:
__magic_name__: str = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: List[Any] = self.get_dummy_components()
__magic_name__: int = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: Union[str, Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[Any] = self.get_dummy_inputs(__snake_case )
__magic_name__: Tuple = sd_pipe(**__snake_case ).images
__magic_name__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: List[Any] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
__magic_name__: Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: str = self.get_dummy_components()
__magic_name__: Dict = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: str = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Optional[Any] = self.get_dummy_inputs(__snake_case )
__magic_name__: List[Any] = """french fries"""
__magic_name__: int = sd_pipe(**__snake_case , negative_prompt=__snake_case )
__magic_name__: Dict = output.images
__magic_name__: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: str = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
__magic_name__: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: int = self.get_dummy_components()
__magic_name__: Dict = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: Dict = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: str = self.get_dummy_inputs(__snake_case )
__magic_name__: List[str] = [inputs["""prompt"""]] * 2
__magic_name__: List[str] = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__magic_name__: Optional[int] = torch.from_numpy(__snake_case ).unsqueeze(0 ).to(__snake_case )
__magic_name__: Tuple = image / 2 + 0.5
__magic_name__: Dict = image.permute(0 , 3 , 1 , 2 )
__magic_name__: List[str] = image.repeat(2 , 1 , 1 , 1 )
__magic_name__: str = sd_pipe(**__snake_case ).images
__magic_name__: List[str] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__magic_name__: Optional[int] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
__magic_name__: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: Union[str, Any] = self.get_dummy_components()
__magic_name__: Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__magic_name__: Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: Optional[int] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[str] = self.get_dummy_inputs(__snake_case )
__magic_name__: Tuple = sd_pipe(**__snake_case ).images
__magic_name__: Any = image[0, -3:, -3:, -1]
__magic_name__: str = [round(__snake_case , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__snake_case ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: Optional[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
__magic_name__: Tuple = self.get_dummy_components()
__magic_name__: Tuple = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: str = VaeImageProcessor(do_resize=__snake_case , do_normalize=__snake_case )
__magic_name__: Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(__snake_case , input_image_type="""pt""" ) )[0]
__magic_name__: Union[str, Any] = components["""vae"""]
__magic_name__: str = self.get_dummy_inputs_by_type(__snake_case , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__magic_name__: int = vae.encode(inputs[image_param] ).latent_dist.mode()
__magic_name__: Dict = pipe(**__snake_case )[0]
__magic_name__: Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(__snake_case , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : str , __snake_case : List[str]=0 ) -> Dict:
__magic_name__: Union[str, Any] = torch.manual_seed(__snake_case )
__magic_name__: Optional[int] = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__magic_name__: Optional[Any] = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: Optional[Any] = self.get_inputs()
__magic_name__: str = pipe(**__snake_case ).images
__magic_name__: Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Any = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
__magic_name__: Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case )
__magic_name__: List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: List[str] = self.get_inputs()
__magic_name__: Dict = pipe(**__snake_case ).images
__magic_name__: str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Optional[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Any ) -> List[str]:
__magic_name__: str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case )
__magic_name__: int = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: Union[str, Any] = self.get_inputs()
__magic_name__: Any = pipe(**__snake_case ).images
__magic_name__: int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Optional[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : int ) -> Dict:
__magic_name__: Tuple = 0
def callback_fn(__snake_case : int , __snake_case : int , __snake_case : torch.FloatTensor ) -> None:
__magic_name__: Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__magic_name__: List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__magic_name__: int = latents[0, -3:, -3:, -1]
__magic_name__: Union[str, Any] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__magic_name__: Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__magic_name__: str = latents[0, -3:, -3:, -1]
__magic_name__: Optional[Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__magic_name__: Tuple = False
__magic_name__: List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case , torch_dtype=torch.floataa )
__magic_name__: Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: Dict = self.get_inputs()
pipe(**__snake_case , callback=__snake_case , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase__ ( self : Tuple ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__: List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case , torch_dtype=torch.floataa )
__magic_name__: int = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__: Optional[int] = self.get_inputs()
__magic_name__: Any = pipe(**__snake_case )
__magic_name__: List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowerCamelCase__ ( self : str ) -> Optional[int]:
__magic_name__: Optional[int] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__magic_name__: Any = inputs["""image"""].resize((5_0_4, 5_0_4) )
__magic_name__: List[str] = """timbrooks/instruct-pix2pix"""
__magic_name__: Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: str = pipe(**__snake_case )
__magic_name__: Optional[int] = output.images[0]
__magic_name__: Union[str, Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__magic_name__: Optional[Any] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 96
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """trocr"""
SCREAMING_SNAKE_CASE__ = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__(self , SCREAMING_SNAKE_CASE_=5_02_65 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = activation_function
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = init_std
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = scale_embedding
UpperCamelCase__ = use_learned_position_embeddings
UpperCamelCase__ = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 707
|
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = graph
self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if sources is int:
UpperCamelCase__ = [sources]
if sinks is int:
UpperCamelCase__ = [sinks]
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0:
return
UpperCamelCase__ = sources[0]
UpperCamelCase__ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1:
UpperCamelCase__ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase__ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCamelCase__ = max_input_flow
UpperCamelCase__ = 0
UpperCamelCase__ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase__ = max_input_flow
UpperCamelCase__ = size - 1
def UpperCAmelCase_ (self ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = algorithm(self )
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = flow_network
UpperCamelCase__ = flow_network.verticesCount
UpperCamelCase__ = flow_network.sourceIndex
UpperCamelCase__ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase__ = flow_network.graph
UpperCamelCase__ = False
def UpperCAmelCase_ (self ):
if not self.executed:
self._algorithm()
UpperCamelCase__ = True
def UpperCAmelCase_ (self ):
pass
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
super().__init__(SCREAMING_SNAKE_CASE_ )
# use this to save your result
UpperCamelCase__ = -1
def UpperCAmelCase_ (self ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase__ = [0] * self.verticies_count
UpperCamelCase__ = [0] * self.verticies_count
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase__ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = vertices_list[i]
UpperCamelCase__ = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = 0
else:
i += 1
UpperCamelCase__ = sum(self.preflow[self.source_index] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.relabel(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase__ = self.heights[to_index]
if min_height is not None:
UpperCamelCase__ = min_height + 1
if __name__ == "__main__":
lowerCamelCase_ = [0]
lowerCamelCase_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCamelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCamelCase_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCamelCase_ = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 86
| 0
|
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =ComputeEnvironment.AMAZON_SAGEMAKER
SCREAMING_SNAKE_CASE_ =True
SCREAMING_SNAKE_CASE_ ='''ml.p3.2xlarge'''
SCREAMING_SNAKE_CASE_ ='''accelerate_sagemaker_execution_role'''
SCREAMING_SNAKE_CASE_ ='''hf-sm'''
SCREAMING_SNAKE_CASE_ ='''us-east-1'''
SCREAMING_SNAKE_CASE_ =1
SCREAMING_SNAKE_CASE_ ='''accelerate-sagemaker-1'''
SCREAMING_SNAKE_CASE_ ='''1.6'''
SCREAMING_SNAKE_CASE_ ='''4.4'''
SCREAMING_SNAKE_CASE_ ='''train.py'''
SCREAMING_SNAKE_CASE_ =[
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
SCREAMING_SNAKE_CASE_ =[
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
'''simple docstring'''
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCAmelCase__ : List[Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , snake_case__ )
assert isinstance(converted_args["do_train"] , snake_case__ )
assert isinstance(converted_args["epochs"] , snake_case__ )
assert isinstance(converted_args["learning_rate"] , snake_case__ )
assert isinstance(converted_args["max_steps"] , snake_case__ )
with pytest.raises(snake_case__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 438
|
"""simple docstring"""
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case : np.ndarray , snake_case : np.ndarray , snake_case : float = 1E-1_2 , snake_case : int = 100 , )-> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(snake_case )[0] == np.shape(snake_case )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case )[0] == np.shape(snake_case )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case ) == np.iscomplexobj(snake_case )
UpperCAmelCase__ : str = np.iscomplexobj(snake_case )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : List[str] = 1E1_2
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase__ : Optional[Any] = np.dot(snake_case , snake_case )
# Normalize the resulting output vector.
UpperCAmelCase__ : Tuple = w / np.linalg.norm(snake_case )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase__ : Any = vector.conj().T if is_complex else vector.T
UpperCAmelCase__ : List[str] = np.dot(snake_case , np.dot(snake_case , snake_case ) )
# Check convergence.
UpperCAmelCase__ : Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Tuple = lambda_
if is_complex:
UpperCAmelCase__ : str = np.real(lambda_ )
return lambda_, vector
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCAmelCase__ : Dict = np.array([41, 4, 20] )
UpperCAmelCase__ : Union[str, Any] = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase__ : Optional[int] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase__ : int = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase__ : List[str] = real_input_matrix
UpperCAmelCase__ : Any = real_vector
elif problem_type == "complex":
UpperCAmelCase__ : str = complex_input_matrix
UpperCAmelCase__ : List[Any] = complex_vector
# Our implementation.
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = power_iteration(snake_case , snake_case )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = np.linalg.eigh(snake_case )
# Last eigenvalue is the maximum one.
UpperCAmelCase__ : str = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase__ : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case ) - np.abs(snake_case ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 438
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 395
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : List[Any] =UniSpeechSatForSequenceClassification.from_pretrained(__lowercase , config=__lowercase )
UpperCamelCase_ : str =downstream_dict['projector.weight']
UpperCamelCase_ : Dict =downstream_dict['projector.bias']
UpperCamelCase_ : Optional[Any] =downstream_dict['model.post_net.linear.weight']
UpperCamelCase_ : Optional[int] =downstream_dict['model.post_net.linear.bias']
return model
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Optional[int] =UniSpeechSatForAudioFrameClassification.from_pretrained(__lowercase , config=__lowercase )
UpperCamelCase_ : Any =downstream_dict['model.linear.weight']
UpperCamelCase_ : str =downstream_dict['model.linear.bias']
return model
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : str =UniSpeechSatForXVector.from_pretrained(__lowercase , config=__lowercase )
UpperCamelCase_ : Optional[int] =downstream_dict['connector.weight']
UpperCamelCase_ : str =downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCamelCase_ : str =downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
UpperCamelCase_ : str =downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
UpperCamelCase_ : List[str] =downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCamelCase_ : str =downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCamelCase_ : int =downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCamelCase_ : List[str] =downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCamelCase_ : int =downstream_dict['objective.W']
return model
@torch.no_grad()
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : int =torch.load(__lowercase , map_location='cpu' )
UpperCamelCase_ : Optional[int] =checkpoint['Downstream']
UpperCamelCase_ : Union[str, Any] =UniSpeechSatConfig.from_pretrained(__lowercase )
UpperCamelCase_ : Any =WavaVecaFeatureExtractor.from_pretrained(
__lowercase , return_attention_mask=__lowercase , do_normalize=__lowercase )
UpperCamelCase_ : Union[str, Any] =hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
UpperCamelCase_ : int =convert_classification(__lowercase , __lowercase , __lowercase )
elif arch.endswith('ForAudioFrameClassification' ):
UpperCamelCase_ : List[Any] =convert_diarization(__lowercase , __lowercase , __lowercase )
elif arch.endswith('ForXVector' ):
UpperCamelCase_ : Optional[Any] =convert_xvector(__lowercase , __lowercase , __lowercase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
UpperCamelCase_ : Tuple =checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 395
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : Tuple = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''decision_transformer'''
UpperCAmelCase__ : Tuple = ['''past_key_values''']
UpperCAmelCase__ : int = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowerCamelCase=17 , lowerCamelCase=4 , lowerCamelCase=128 , lowerCamelCase=4_096 , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=1_024 , lowerCamelCase=3 , lowerCamelCase=1 , lowerCamelCase=None , lowerCamelCase="relu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1e-5 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=50_256 , lowerCamelCase=50_256 , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
_snake_case = state_dim
_snake_case = act_dim
_snake_case = hidden_size
_snake_case = max_ep_len
_snake_case = action_tanh
_snake_case = vocab_size
_snake_case = n_positions
_snake_case = n_layer
_snake_case = n_head
_snake_case = n_inner
_snake_case = activation_function
_snake_case = resid_pdrop
_snake_case = embd_pdrop
_snake_case = attn_pdrop
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = scale_attn_weights
_snake_case = use_cache
_snake_case = scale_attn_by_inverse_layer_idx
_snake_case = reorder_and_upcast_attn
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
| 672
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = XGLMTokenizer
A__ : Optional[int] = XGLMTokenizerFast
A__ : int = True
A__ : Optional[Any] = True
def snake_case_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Optional[Any] = XGLMTokenizer(_snake_case , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : List[Any] ):
__lowercase : int = '''<pad>'''
__lowercase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(_snake_case ) , 1008 )
def snake_case_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def snake_case_ ( self : Dict ):
__lowercase : List[str] = XGLMTokenizer(_snake_case , keep_accents=_snake_case )
__lowercase : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowercase : Tuple = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def snake_case_ ( self : List[str] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def snake_case_ ( self : Any ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_snake_case , f.name )
__lowercase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=_snake_case )
__lowercase : List[str] = pickle.dumps(_snake_case )
pickle.loads(_snake_case )
def snake_case_ ( self : str ):
if not self.test_rust_tokenizer:
return
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Optional[int] = self.get_rust_tokenizer()
__lowercase : Dict = '''I was born in 92000, and this is falsé.'''
__lowercase : int = tokenizer.tokenize(_snake_case )
__lowercase : int = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowercase : Dict = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
__lowercase : Tuple = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowercase : Any = self.get_rust_tokenizer()
__lowercase : List[str] = tokenizer.encode(_snake_case )
__lowercase : Union[str, Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[Any] = '''Hello World!'''
__lowercase : int = [2, 3_1227, 4447, 35]
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) )
@slow
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[int] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
__lowercase : Any = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) )
@slow
def snake_case_ ( self : Union[str, Any] ):
# fmt: off
__lowercase : Optional[Any] = {
'''input_ids''': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/xglm-564M''' , padding=_snake_case , )
| 509
| 0
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ = concatenate_datasets
A_ = DownloadConfig
A_ = DownloadManager
A_ = DownloadMode
A_ = DownloadConfig
A_ = DownloadMode
A_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 384
|
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_="" , SCREAMING_SNAKE_CASE_="train" ) -> List[Any]:
'''simple docstring'''
assert os.path.isdir(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = []
lowerCamelCase_ = os.listdir(SCREAMING_SNAKE_CASE_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE_ )
def __len__( self ) -> List[str]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.documents[idx]
lowerCamelCase_ = document_path.split('/' )[-1]
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as source:
lowerCamelCase_ = source.read()
lowerCamelCase_ ,lowerCamelCase_ = process_story(SCREAMING_SNAKE_CASE_ )
return document_name, story_lines, summary_lines
def _UpperCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase_ = list(filter(lambda __UpperCamelCase : len(__UpperCamelCase ) != 0 ,[line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase_ = [_add_missing_period(__UpperCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCamelCase_ = []
lowerCamelCase_ = deque(__UpperCamelCase )
while True:
try:
lowerCamelCase_ = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(__UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase_ = list(filter(lambda __UpperCamelCase : not t.startswith('@highlight' ) ,__UpperCamelCase ) )
return story_lines, summary_lines
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
if len(__UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__UpperCamelCase )) )
return sequence
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = torch.ones_like(__UpperCamelCase )
lowerCamelCase_ = sequence == pad_token_id
lowerCamelCase_ = 0
return mask
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
lowerCamelCase_ = [tokenizer.encode(__UpperCamelCase ) for line in story_lines]
lowerCamelCase_ = [token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase_ = [tokenizer.encode(__UpperCamelCase ) for line in summary_lines]
lowerCamelCase_ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = []
for sequence in batch:
lowerCamelCase_ = -1
lowerCamelCase_ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__UpperCamelCase )
return torch.tensor(__UpperCamelCase )
| 384
| 1
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __a ( a, a, a ):
"""simple docstring"""
_a = 1.5
_a = int(factor * num_class_images )
_a = ClipClient(
url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=a, aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images', exist_ok=a )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_a = client.query(text=a )
if len(a ) >= factor * num_class_images or num_images > 1e4:
break
else:
_a = int(factor * num_images )
_a = ClipClient(
url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=a, aesthetic_weight=0.1, )
_a = 0
_a = 0
_a = tqdm(desc="downloading real regularization images", total=a )
with open(F'{class_data_dir}/caption.txt', "w" ) as fa, open(F'{class_data_dir}/urls.txt', "w" ) as fa, open(
F'{class_data_dir}/images.txt', "w" ) as fa:
while total < num_class_images:
_a = class_images[count]
count += 1
try:
_a = requests.get(images["url"] )
if img.status_code == 2_0_0:
_a = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg', "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __a ( ):
"""simple docstring"""
_a = argparse.ArgumentParser("", add_help=a )
parser.add_argument("--class_prompt", help="text prompt to retrieve images", required=a, type=a )
parser.add_argument("--class_data_dir", help="path to save images", required=a, type=a )
parser.add_argument("--num_class_images", help="number of images to download", default=2_0_0, type=a )
return parser.parse_args()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 388
|
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_a = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1E-05, "token": 38_015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1E-05, "token": 25_506, "token_str": " accuser"},
] , )
_a = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1E-05,
"token": 38_015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1E-05,
"token": 25_506,
"token_str": " accuser",
},
] , )
_a = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2E-05, "token": 13_606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2E-05, "token": 3_499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9E-05, "token": 2_941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_a = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2E-05, "token": 35_676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2E-05, "token": 16_416, "token_str": "ELS"},
] , )
_a = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2E-05,
"token": 35_676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2E-05, "token": 16_416, "token_str": "ELS"},
] , )
_a = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1E-05, "token": 3_499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2E-05, "token": 2_941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2E-05, "token": 13_606, "token_str": " Clara"},
] , )
_a = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
[
{
"score": 2.2E-05,
"token": 35_676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2E-05, "token": 16_416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2E-05,
"token": 35_676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2E-05, "token": 16_416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_a = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(UpperCamelCase__ )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , UpperCamelCase__ :Any ):
_a = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1_573, "token_str": " Chris"},
] , )
_a = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2_201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12_790,
"token_str": " Lyon",
},
] , )
_a = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3_499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13_606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2_941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_a = None
_a = None
self.run_pipeline_test(UpperCamelCase__ , [] )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_a = None
_a = None
self.run_pipeline_test(UpperCamelCase__ , [] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :Dict , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :int ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_a = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
_a = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Optional[int] ):
_a = fill_masker.tokenizer
_a = fill_masker.model
_a = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
UpperCamelCase__ , [
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
] , )
_a = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
UpperCamelCase__ , [
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
] , )
_a = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
UpperCamelCase__ , [
[
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
],
[
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
],
] , )
with self.assertRaises(UpperCamelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(UpperCamelCase__ ):
fill_masker("This is" )
self.run_test_top_k(UpperCamelCase__ , UpperCamelCase__ )
self.run_test_targets(UpperCamelCase__ , UpperCamelCase__ )
self.run_test_top_k_targets(UpperCamelCase__ , UpperCamelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(UpperCamelCase__ , UpperCamelCase__ )
self.fill_mask_with_multiple_masks(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Tuple ):
_a = tokenizer.get_vocab()
_a = sorted(vocab.keys() )[:2]
# Pipeline argument
_a = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , targets=UpperCamelCase__ )
_a = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
UpperCamelCase__ , [
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
] , )
_a = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , UpperCamelCase__ )
_a = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(UpperCamelCase__ ) )
# Call argument
_a = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
_a = fill_masker(f'This is a {tokenizer.mask_token}' , targets=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
] , )
_a = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , UpperCamelCase__ )
_a = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(UpperCamelCase__ ) )
# Score equivalence
_a = fill_masker(f'This is a {tokenizer.mask_token}' , targets=UpperCamelCase__ )
_a = [top_mask["token_str"] for top_mask in outputs]
_a = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCamelCase__ ) == set(UpperCamelCase__ ):
_a = fill_masker(f'This is a {tokenizer.mask_token}' , targets=UpperCamelCase__ )
_a = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
# Raises with invalid
with self.assertRaises(UpperCamelCase__ ):
_a = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(UpperCamelCase__ ):
_a = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""] )
with self.assertRaises(UpperCamelCase__ ):
_a = fill_masker(f'This is a {tokenizer.mask_token}' , targets="" )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , UpperCamelCase__ :Tuple , UpperCamelCase__ :str ):
_a = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , top_k=2 )
_a = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
UpperCamelCase__ , [
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
] , )
_a = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
_a = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
] , )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self :str , UpperCamelCase__ :str , UpperCamelCase__ :Union[str, Any] ):
_a = tokenizer.get_vocab()
_a = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# top_k=2, ntargets=3
_a = sorted(vocab.keys() )[:3]
_a = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=UpperCamelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_a = [el["token_str"] for el in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x["score"] , reverse=UpperCamelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCamelCase__ ).issubset(UpperCamelCase__ ):
_a = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=UpperCamelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Optional[Any] ):
_a = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
_a = tokenizer.get_vocab()
# String duplicates + id duplicates
_a = sorted(vocab.keys() )[:3]
_a = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_a = fill_masker(f'My name is {tokenizer.mask_token}' , targets=UpperCamelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(UpperCamelCase__ ) , 3 )
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :int , UpperCamelCase__ :List[Any] ):
_a = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
_a = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
],
[
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
],
[
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
{"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )},
],
] , )
| 388
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 509
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
lowercase : Tuple = "gptj"
lowercase : List[Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __UpperCAmelCase=5_04_00 , __UpperCAmelCase=20_48 , __UpperCAmelCase=40_96 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> str:
a : Optional[Any] = vocab_size
a : str = n_positions
a : Optional[Any] = n_embd
a : Tuple = n_layer
a : List[str] = n_head
a : Optional[Any] = n_inner
a : int = rotary_dim
a : str = activation_function
a : Tuple = resid_pdrop
a : List[str] = embd_pdrop
a : Dict = attn_pdrop
a : Dict = layer_norm_epsilon
a : List[Any] = initializer_range
a : Dict = use_cache
a : Optional[int] = bos_token_id
a : str = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> List[str]:
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , 'pad_token_id' , __UpperCAmelCase ):
# TODO: how to do that better?
a : Any = 0
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
a : int = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='inputs' )
a : List[str] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
a : Tuple = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase_ ( self ) -> int:
return self._config.n_layer
@property
def lowercase_ ( self ) -> int:
return self._config.n_head
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
a : Optional[int] = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
a : Dict = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
a , a : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
a : List[str] = seqlen + 2
a : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a : Optional[int] = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
a : Any = common_inputs['attention_mask']
if self.use_past:
a : Any = ordered_inputs['attention_mask'].dtype
a : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase_ ( self ) -> int:
return 13
| 509
| 1
|
from PIL import Image
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> List[str]:
"""simple docstring"""
UpperCamelCase_ = image.size
UpperCamelCase_ = 0
UpperCamelCase_ = image.load()
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
UpperCamelCase_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCamelCase_ = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[str] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 628
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
snake_case = logging.get_logger(__name__)
snake_case = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __A ( snake_case__ ):
'''simple docstring'''
a_ = '''marian'''
a_ = ['''past_key_values''']
a_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _snake_case=5_8101 , _snake_case=None , _snake_case=1024 , _snake_case=12 , _snake_case=4096 , _snake_case=16 , _snake_case=12 , _snake_case=4096 , _snake_case=16 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=1024 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=5_8100 , _snake_case=False , _snake_case=5_8100 , _snake_case=0 , _snake_case=0 , _snake_case=True , **_snake_case , ):
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Optional[int] = decoder_vocab_size or vocab_size
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Tuple = d_model
_lowerCAmelCase : Any = encoder_ffn_dim
_lowerCAmelCase : int = encoder_layers
_lowerCAmelCase : Optional[Any] = encoder_attention_heads
_lowerCAmelCase : Optional[Any] = decoder_ffn_dim
_lowerCAmelCase : Tuple = decoder_layers
_lowerCAmelCase : List[Any] = decoder_attention_heads
_lowerCAmelCase : int = dropout
_lowerCAmelCase : Optional[Any] = attention_dropout
_lowerCAmelCase : str = activation_dropout
_lowerCAmelCase : List[str] = activation_function
_lowerCAmelCase : int = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : str = decoder_layerdrop
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , **_snake_case , )
class __A ( snake_case__ ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase : Optional[Any] = {0: "batch"}
_lowerCAmelCase : Tuple = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase : str = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.num_layers
for i in range(_snake_case ):
_lowerCAmelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase : List[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Tuple = super().outputs
else:
_lowerCAmelCase : Dict = super(_snake_case , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase : Any = self.num_layers
for i in range(_snake_case ):
_lowerCAmelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase : str = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ):
_lowerCAmelCase : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Generate decoder inputs
_lowerCAmelCase : Union[str, Any] = seq_length if not self.use_past else 1
_lowerCAmelCase : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
_lowerCAmelCase : Union[str, Any] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase : Union[str, Any] = dict(**_snake_case , **_snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Tuple = common_inputs["input_ids"].shape
_lowerCAmelCase : List[str] = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.num_attention_heads
_lowerCAmelCase : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : Dict = decoder_seq_length + 3
_lowerCAmelCase : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase : Union[str, Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_snake_case , _snake_case )] , dim=1 )
_lowerCAmelCase : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase : Dict = self.num_layers
_lowerCAmelCase : Union[str, Any] = min(_snake_case , _snake_case )
_lowerCAmelCase : List[Any] = max(_snake_case , _snake_case ) - min_num_layers
_lowerCAmelCase : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(_snake_case ),
torch.zeros(_snake_case ),
torch.zeros(_snake_case ),
torch.zeros(_snake_case ),
) )
# TODO: test this.
_lowerCAmelCase : int = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_snake_case , _snake_case ):
common_inputs["past_key_values"].append((torch.zeros(_snake_case ), torch.zeros(_snake_case )) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ):
_lowerCAmelCase : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Union[str, Any] = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.num_layers
_lowerCAmelCase , _lowerCAmelCase : Any = self.num_attention_heads
_lowerCAmelCase : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : Union[str, Any] = common_inputs["attention_mask"].dtype
_lowerCAmelCase : Union[str, Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 )
_lowerCAmelCase : Tuple = [
(torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(_snake_case )
]
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
_snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase : int = tokenizer.num_special_tokens_to_add(_snake_case )
_lowerCAmelCase : Optional[Any] = compute_effective_axis_dimension(
_snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_snake_case )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase : Any = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase : Union[str, Any] = dict(tokenizer(_snake_case , return_tensors=_snake_case ) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
else:
_lowerCAmelCase : Tuple = self._generate_dummy_inputs_for_causal_lm(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Any = super()._flatten_past_key_values_(_snake_case , _snake_case , _snake_case , _snake_case )
else:
_lowerCAmelCase : int = super(_snake_case , self )._flatten_past_key_values_(
_snake_case , _snake_case , _snake_case , _snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-4
| 424
| 0
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self._create_example_records()
UpperCAmelCase__ : Dict = Dataset.from_list(_A )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_A ):
self.assertDictEqual(_A , example_records[i] )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self._create_example_records()
UpperCAmelCase__ : List[str] = Dataset.from_list(_A )
UpperCAmelCase__ : Dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase_ ( self : str ): # checks what happens with missing columns
'''simple docstring'''
UpperCAmelCase__ : Any = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
UpperCAmelCase__ : str = Dataset.from_list(_A )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase_ ( self : Dict ): # checks if the type can be inferred from the second record
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
UpperCAmelCase__ : Tuple = Dataset.from_list(_A )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = Dataset.from_list([] )
self.assertEqual(len(_A ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 312
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCamelCase_ :
def __init__( self : Dict , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Dict=True , _A : List[Any]=True , _A : List[Any]=99 , _A : List[Any]=32 , _A : List[str]=5 , _A : List[str]=4 , _A : int=37 , _A : Optional[int]="gelu" , _A : Tuple=0.1 , _A : Optional[int]=0.1 , _A : Tuple=50 , _A : Optional[int]=0.0_2 , _A : Union[str, Any]=True , _A : Union[str, Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Optional[Any] = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : str = scope
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_A , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : Dict , _A : List[str] , _A : List[Any] , _A : int , _A : str , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : str = BertGenerationEncoder(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(_A , attention_mask=_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : List[str] , _A : Union[str, Any] , _A : List[str] , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Any = BertGenerationEncoder(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
UpperCAmelCase__ : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[int] , _A : Tuple , _A : Tuple , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : int , **_A : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[Any] = BertGenerationDecoder(config=_A ).to(_A ).eval()
# first forward pass
UpperCAmelCase__ : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
UpperCAmelCase__ : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase__ : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['''hidden_states'''][0]
UpperCAmelCase__ : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def lowercase_ ( self : Optional[int] , _A : Dict , _A : Tuple , _A : Optional[int] , _A : List[str] , *_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BertGenerationDecoder(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowerCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BertGenerationEncoderTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = '''bert'''
self.model_tester.create_and_check_model(_A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
_A , _A , _A , _A , _A , _A , )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(_A )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
UpperCAmelCase__ : List[Any] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(_A )[0]
UpperCAmelCase__ : Union[str, Any] = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape , _A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
UpperCAmelCase__ : List[str] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(_A )[0]
UpperCAmelCase__ : Optional[int] = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape , _A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 312
| 1
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__)
class snake_case_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , **__lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['bs4'] )
super().__init__(**snake_case_ )
def UpperCAmelCase ( self : Any , __lowerCamelCase : Tuple ) -> Dict:
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__lowercase = parent.find_all(child.name , recursive=snake_case_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(snake_case_ ) else next(i for i, s in enumerate(snake_case_ , 1 ) if s is child ) )
__lowercase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
__lowercase = BeautifulSoup(snake_case_ , 'html.parser' )
__lowercase = []
__lowercase = []
__lowercase = []
for element in html_code.descendants:
if type(snake_case_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__lowercase = html.unescape(snake_case_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(snake_case_ )
__lowercase , __lowercase = self.xpath_soup(snake_case_ )
stringaxtag_seq.append(snake_case_ )
stringaxsubs_seq.append(snake_case_ )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
__lowercase = ''
for tagname, subs in zip(snake_case_ , snake_case_ ):
xpath += F"/{tagname}"
if subs != 0:
xpath += F"[{subs}]"
return xpath
def __call__( self : Union[str, Any] , __lowerCamelCase : Tuple ) -> BatchFeature:
'''simple docstring'''
__lowercase = False
# Check that strings has a valid type
if isinstance(snake_case_ , snake_case_ ):
__lowercase = True
elif isinstance(snake_case_ , (list, tuple) ):
if len(snake_case_ ) == 0 or isinstance(html_strings[0] , snake_case_ ):
__lowercase = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F"but is of type {type(snake_case_ )}." )
__lowercase = bool(isinstance(snake_case_ , (list, tuple) ) and (isinstance(html_strings[0] , snake_case_ )) )
if not is_batched:
__lowercase = [html_strings]
# Get nodes + xpaths
__lowercase = []
__lowercase = []
for html_string in html_strings:
__lowercase , __lowercase , __lowercase = self.get_three_from_single(snake_case_ )
nodes.append(snake_case_ )
__lowercase = []
for node, tag_list, sub_list in zip(snake_case_ , snake_case_ , snake_case_ ):
__lowercase = self.construct_xpath(snake_case_ , snake_case_ )
xpath_strings.append(snake_case_ )
xpaths.append(snake_case_ )
# return as Dict
__lowercase = {'nodes': nodes, 'xpaths': xpaths}
__lowercase = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
return encoded_inputs
| 375
|
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class a :
"""simple docstring"""
def __init__( self , snake_case_ ) -> Union[str, Any]:
_UpperCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_UpperCAmelCase = len(snake_case_ ) - 1
def __A ( self , snake_case_ ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , snake_case_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(snake_case_ ) , 5 ) == 1
return output_values
def __A ( self , snake_case_ ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCAmelCase = self.basis_function(snake_case_ )
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , snake_case_ = 0.01 ) -> int:
from matplotlib import pyplot as plt # type: ignore
_UpperCAmelCase = [] # x coordinates of points to plot
_UpperCAmelCase = [] # y coordinates of points to plot
_UpperCAmelCase = 0.0
while t <= 1:
_UpperCAmelCase = self.bezier_curve_function(snake_case_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_UpperCAmelCase = [i[0] for i in self.list_of_points]
_UpperCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
snake_case_ , snake_case_ , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(snake_case_ , snake_case_ , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 426
| 0
|
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = 1.60_21e-19 # units = C
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: float , lowerCAmelCase: float , lowerCAmelCase: float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
SCREAMING_SNAKE_CASE_ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
SCREAMING_SNAKE_CASE_ = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
SCREAMING_SNAKE_CASE_ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
SCREAMING_SNAKE_CASE_ = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
SCREAMING_SNAKE_CASE_ = 'allenai'
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> List[Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_UpperCAmelCase : Optional[int] = dict((re.sub(R"@@$" , "" , lowerCAmelCase ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase : Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_UpperCAmelCase : Any = d[k] # restore
return da
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] ) -> int:
# prep
assert os.path.exists(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_UpperCAmelCase : Optional[Any] = basename(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = dirname(lowerCAmelCase )
_UpperCAmelCase : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_UpperCAmelCase : Optional[int] = cls.hub_models()
_UpperCAmelCase : Dict = {"bpe": "fastbpe", "tokenizer": "moses"}
_UpperCAmelCase : Tuple = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
_UpperCAmelCase : int = hub_utils.from_pretrained(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , archive_map=lowerCAmelCase , **lowerCAmelCase )
_UpperCAmelCase : Any = vars(chkpt["args"]["model"] )
_UpperCAmelCase : Optional[int] = args["source_lang"]
_UpperCAmelCase : Dict = args["target_lang"]
_UpperCAmelCase : int = dirname(lowerCAmelCase )
_UpperCAmelCase : str = basename(lowerCAmelCase )
# dicts
_UpperCAmelCase : List[str] = os.path.join(lowerCAmelCase , F'dict.{src_lang}.txt' )
_UpperCAmelCase : Dict = os.path.join(lowerCAmelCase , F'dict.{tgt_lang}.txt' )
_UpperCAmelCase : Union[str, Any] = Dictionary.load(lowerCAmelCase )
_UpperCAmelCase : Any = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase : int = len(lowerCAmelCase )
_UpperCAmelCase : List[Any] = os.path.join(lowerCAmelCase , "vocab-src.json" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_UpperCAmelCase : Union[str, Any] = True
for k in src_vocab.keys():
if not k.islower():
_UpperCAmelCase : Optional[int] = False
break
_UpperCAmelCase : Dict = Dictionary.load(lowerCAmelCase )
_UpperCAmelCase : List[str] = rewrite_dict_keys(tgt_dict.indices )
_UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase )
_UpperCAmelCase : Dict = os.path.join(lowerCAmelCase , "vocab-tgt.json" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase : int = os.path.join(lowerCAmelCase , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_UpperCAmelCase : Tuple = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ):
break
with open(lowerCAmelCase , encoding="utf-8" ) as fin:
_UpperCAmelCase : List[str] = fin.read()
_UpperCAmelCase : str = re.sub(R" \d+$" , "" , lowerCAmelCase , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as fout:
fout.write(lowerCAmelCase )
# model config
_UpperCAmelCase : int = os.path.join(lowerCAmelCase , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
_UpperCAmelCase : Union[str, Any] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
_UpperCAmelCase : Union[str, Any] = 5
_UpperCAmelCase : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_UpperCAmelCase : str = best_score_hparams[model_dir]["length_penalty"]
else:
_UpperCAmelCase : str = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : str = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# model
_UpperCAmelCase : Optional[int] = chkpt["models"][0]
_UpperCAmelCase : int = model.state_dict()
# rename keys to start with 'model.'
_UpperCAmelCase : Union[str, Any] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_UpperCAmelCase : Any = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : List[Any] = FSMTConfig.from_pretrained(lowerCAmelCase )
_UpperCAmelCase : List[Any] = FSMTForConditionalGeneration(lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
# save
_UpperCAmelCase : List[str] = os.path.join(lowerCAmelCase , lowerCAmelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase , lowerCAmelCase )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 467
| 0
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
A_ = logging.get_logger(__name__)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: int ):
UpperCamelCase_ =question_encoder
UpperCamelCase_ =generator
UpperCamelCase_ =self.question_encoder
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: str ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
UpperCamelCase_ =os.path.join(UpperCamelCase_ , "question_encoder_tokenizer" )
UpperCamelCase_ =os.path.join(UpperCamelCase_ , "generator_tokenizer" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def UpperCamelCase__ ( cls: Any , UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
UpperCamelCase_ =kwargs.pop("config" , UpperCamelCase_ )
if config is None:
UpperCamelCase_ =RagConfig.from_pretrained(UpperCamelCase_ )
UpperCamelCase_ =AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
UpperCamelCase_ =AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Dict , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Tuple ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: int , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =self.question_encoder
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.generator
def UpperCamelCase__ ( self: str , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: Optional[Any] , ):
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , UpperCamelCase_ , )
if max_length is None:
UpperCamelCase_ =self.current_tokenizer.model_max_length
UpperCamelCase_ =self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCamelCase_ =self.current_tokenizer.model_max_length
UpperCamelCase_ =self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase_ =labels["input_ids"]
return model_inputs
| 391
|
"""simple docstring"""
import string
def _UpperCamelCase ( A ):
UpperCamelCase_ =""
for i in sequence:
UpperCamelCase_ =ord(A )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _UpperCamelCase ( A ):
UpperCamelCase_ =string.ascii_letters
UpperCamelCase_ =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(A )] if c in letters else c for c in sequence )
def _UpperCamelCase ( ):
from timeit import timeit
print("Running performance benchmarks..." )
UpperCamelCase_ ="from string import printable ; from __main__ import atbash, atbash_slow"
print(f"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=A )} seconds""" )
print(f"""> atbash(): {timeit("atbash(printable)" , setup=A )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 391
| 1
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__(self : Dict , A__ : str , A__ : int=3 , A__ : Dict=3_2 , A__ : str=3 , A__ : str=1_0 , A__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] , A__ : Tuple=[1, 1, 2, 1] , A__ : int=True , A__ : List[Any]=True , A__ : List[Any]="relu" , A__ : Any=3 , A__ : Any=None , ) -> Tuple:
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(A__ )
def UpperCAmelCase__ (self : str ) -> List[str]:
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self : Optional[Any] ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase__ (self : Dict , A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict ) -> str:
lowercase = RegNetModel(config=A__ )
model.to(A__ )
model.eval()
lowercase = model(A__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ (self : List[str] , A__ : List[str] , A__ : Union[str, Any] , A__ : str ) -> Dict:
lowercase = self.num_labels
lowercase = RegNetForImageClassification(A__ )
model.to(A__ )
model.eval()
lowercase = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self : Any ) -> Union[str, Any]:
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
UpperCAmelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCAmelCase : Dict = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Dict = False
UpperCAmelCase : int = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Tuple = False
def UpperCAmelCase__ (self : Optional[int] ) -> Tuple:
lowercase = RegNetModelTester(self )
lowercase = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def UpperCAmelCase__ (self : List[Any] ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ (self : Optional[Any] ) -> int:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase__ (self : int ) -> Optional[int]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase__ (self : List[Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ (self : Any ) -> str:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCAmelCase__ (self : List[str] ) -> Tuple:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(config=A__ )
for name, module in model.named_modules():
if isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def UpperCAmelCase__ (self : Optional[Any] ) -> List[str]:
def check_hidden_states_output(A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ):
lowercase = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(A__ , A__ ) )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(A__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase = layer_type
lowercase = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(A__ , A__ , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Tuple:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCAmelCase__ (self : Tuple ) -> Any:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = RegNetModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self : Optional[int] ) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ (self : List[str] ) -> int:
lowercase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A__ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ , return_tensors="pt" ).to(A__ )
# forward pass
with torch.no_grad():
lowercase = model(**A__ )
# verify the logits
lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A__ )
lowercase = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 702
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase :
UpperCAmelCase : int
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = Node(1 )
lowercase = Node(2 )
lowercase = Node(3 )
lowercase = Node(4 )
lowercase = Node(5 )
return tree
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
if root is None:
return output
lowercase = deque([root] )
while process_queue:
lowercase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
def populate_output(lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCAmelCase_ , lowerCAmelCase_ )
return output
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
def populate_output(lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCAmelCase_ , lowerCAmelCase_ )
return output
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
if root is None:
return []
lowercase = []
lowercase = 0
lowercase = height(lowerCAmelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowercase = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowercase = 0
return output
def UpperCAmelCase_ ( ): # Main function for testing.
"""simple docstring"""
lowercase = make_tree()
print(f'In-order Traversal: {inorder(lowerCAmelCase_ )}' )
print(f'Pre-order Traversal: {preorder(lowerCAmelCase_ )}' )
print(f'Post-order Traversal: {postorder(lowerCAmelCase_ )}' , "\n" )
print(f'Height of Tree: {height(lowerCAmelCase_ )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowerCAmelCase_ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowerCAmelCase_ ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(lowerCAmelCase_ , level=lowerCAmelCase_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 459
| 0
|
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
a : Optional[Any] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
a : Optional[Any] = get_tests_dir('''fixtures/vocab.json''')
a : Optional[int] = get_tests_dir('''fixtures''')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def A ( self : Dict ):
"""simple docstring"""
__snake_case = 0
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a_ , a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = WavaVecaConfig()
__snake_case = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a_ )
processor.save_pretrained(a_ )
__snake_case = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a_ , os.path.join(a_ , a_ ) )
copyfile(a_ , os.path.join(a_ , "vocab.json" ) )
__snake_case = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = WavaVecaFeatureExtractor()
__snake_case = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
__snake_case = WavaVecaProcessor(a_ , a_ )
# save in new folder
processor.save_pretrained(a_ )
# drop `processor_class` in tokenizer
with open(os.path.join(a_ , a_ ) , "r" ) as f:
__snake_case = json.load(a_ )
config_dict.pop("processor_class" )
with open(os.path.join(a_ , a_ ) , "w" ) as f:
f.write(json.dumps(a_ ) )
__snake_case = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = WavaVecaFeatureExtractor()
__snake_case = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
__snake_case = WavaVecaProcessor(a_ , a_ )
# save in new folder
processor.save_pretrained(a_ )
# drop `processor_class` in feature extractor
with open(os.path.join(a_ , a_ ) , "r" ) as f:
__snake_case = json.load(a_ )
config_dict.pop("processor_class" )
with open(os.path.join(a_ , a_ ) , "w" ) as f:
f.write(json.dumps(a_ ) )
__snake_case = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a_ )
# copy relevant files
copyfile(a_ , os.path.join(a_ , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a_ , a_ ) , "w" ) as f:
f.write("{}" )
__snake_case = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : List[Any] ):
"""simple docstring"""
with self.assertRaises(a_ ):
__snake_case = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a_ ):
__snake_case = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a_ )
__snake_case = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
__snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
__snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
__snake_case = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a_ , use_fast=a_ )
__snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("custom" , a_ )
AutoFeatureExtractor.register(a_ , a_ )
AutoTokenizer.register(a_ , slow_tokenizer_class=a_ )
AutoProcessor.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
AutoProcessor.register(a_ , a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case = CustomFeatureExtractor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__snake_case = CustomTokenizer(a_ )
__snake_case = CustomProcessor(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a_ )
__snake_case = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A ( self : Union[str, Any] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = False
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = False
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """AutoFeatureExtractor"""
__SCREAMING_SNAKE_CASE = """AutoTokenizer"""
__SCREAMING_SNAKE_CASE = False
try:
AutoConfig.register("custom" , a_ )
AutoFeatureExtractor.register(a_ , a_ )
AutoTokenizer.register(a_ , slow_tokenizer_class=a_ )
AutoProcessor.register(a_ , a_ )
# If remote code is not set, the default is to use local classes.
__snake_case = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__snake_case = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a_ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__snake_case = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a_ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A ( self : str ):
"""simple docstring"""
__snake_case = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def A ( cls : Optional[Any] ):
"""simple docstring"""
__snake_case = TOKEN
HfFolder.save_token(a_ )
@classmethod
def A ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = WavaVecaProcessor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a_ , "test-processor" ) , push_to_hub=a_ , use_auth_token=self._token )
__snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(new_processor.feature_extractor , a_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = WavaVecaProcessor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a_ , "test-processor-org" ) , push_to_hub=a_ , use_auth_token=self._token , organization="valid_org" , )
__snake_case = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(new_processor.feature_extractor , a_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A ( self : Any ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__snake_case = CustomFeatureExtractor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__snake_case = CustomTokenizer(a_ )
__snake_case = CustomProcessor(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
__snake_case = Repository(a_ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(a_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a_ , "tokenizer_config.json" ) ) as f:
__snake_case = json.load(a_ )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a_ , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a_ , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a_ , "custom_processing.py" ) ) )
repo.push_to_hub()
__snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 69
|
_a : Tuple = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def snake_case__ ( UpperCAmelCase : float ):
assert type(UpperCAmelCase ) in (int, float) and decimal == int(UpperCAmelCase )
lowerCAmelCase__ :str = int(UpperCAmelCase )
lowerCAmelCase__ :Any = ""
lowerCAmelCase__ :Union[str, Any] = False
if decimal < 0:
lowerCAmelCase__ :Optional[Any] = True
decimal *= -1
while decimal > 0:
lowerCAmelCase__ ,lowerCAmelCase__ :Any = divmod(UpperCAmelCase , 1_6 )
lowerCAmelCase__ :Union[str, Any] = values[remainder] + hexadecimal
lowerCAmelCase__ :Tuple = "0x" + hexadecimal
if negative:
lowerCAmelCase__ :Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
| 0
|
import unittest
from knapsack import knapsack as k
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =[0]
_SCREAMING_SNAKE_CASE =[0]
_SCREAMING_SNAKE_CASE =len(_a )
self.assertEqual(k.knapsack(_a , _a , _a , _a ) , 0 )
_SCREAMING_SNAKE_CASE =[60]
_SCREAMING_SNAKE_CASE =[10]
_SCREAMING_SNAKE_CASE =len(_a )
self.assertEqual(k.knapsack(_a , _a , _a , _a ) , 0 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =[1, 2, 3]
_SCREAMING_SNAKE_CASE =[3, 2, 1]
_SCREAMING_SNAKE_CASE =len(_a )
self.assertEqual(k.knapsack(_a , _a , _a , _a ) , 5 )
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =50
_SCREAMING_SNAKE_CASE =[60, 100, 120]
_SCREAMING_SNAKE_CASE =[10, 20, 30]
_SCREAMING_SNAKE_CASE =len(_a )
self.assertEqual(k.knapsack(_a , _a , _a , _a ) , 220 )
if __name__ == "__main__":
unittest.main()
| 191
|
import string
from math import logaa
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation)).replace('''\n''' ,'''''')
_SCREAMING_SNAKE_CASE =document_without_punctuation.split(''' ''') # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()])
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation)) # strip all punctuation and replace it with ''
_SCREAMING_SNAKE_CASE =corpus_without_punctuation.split('''\n''')
_SCREAMING_SNAKE_CASE =term.lower()
return (len([doc for doc in docs if term in doc]), len(a__))
def lowerCamelCase( a__ ,a__ ,a__=False):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''')
return round(1 + logaa(n / (1 + df)) ,3)
if df == 0:
raise ZeroDivisionError('''df must be > 0''')
elif n == 0:
raise ValueError('''log10(0) is undefined.''')
return round(logaa(n / df) ,3)
def lowerCamelCase( a__ ,a__):
return round(tf * idf ,3)
| 191
| 1
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = XLMTokenizer
_snake_case : Any = False
def _snake_case ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowerCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
def _snake_case ( self : Dict , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = 'lower newer'
__lowercase = 'lower newer'
return input_text, output_text
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = XLMTokenizer(self.vocab_file , self.merges_file )
__lowercase = 'lower'
__lowercase = ['low', 'er</w>']
__lowercase = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__lowercase = tokens + ['<unk>']
__lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
@slow
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
__lowercase = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
__lowercase = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 402
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
A = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
A = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
A = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/hendrycks/math' ,codebase_urls=['https://github.com/hendrycks/math'] ,)
def _lowerCamelCase ( self : int ,UpperCamelCase : int ,UpperCamelCase : Optional[int] ) -> Optional[Any]:
_lowercase : Optional[int] = 0.0
for i, j in zip(UpperCamelCase ,UpperCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase ,UpperCamelCase ) else 0.0
_lowercase : Any = n_correct / len(UpperCamelCase )
return {
"accuracy": accuracy,
}
| 125
| 0
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase = {}
__lowercase = job["""started_at"""]
__lowercase = job["""completed_at"""]
__lowercase = date_parser.parse(UpperCamelCase__ )
__lowercase = date_parser.parse(UpperCamelCase__ )
__lowercase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__lowercase = start
__lowercase = end
__lowercase = duration_in_min
return job_info
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int=None ):
"""simple docstring"""
__lowercase = None
if token is not None:
__lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
__lowercase = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__lowercase = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
__lowercase = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCamelCase__ ) for job in result["""jobs"""]} )
__lowercase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase__ ):
__lowercase = requests.get(url + f'''&page={i + 2}''' , headers=UpperCamelCase__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCamelCase__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
UpperCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
UpperCAmelCase__ =parser.parse_args()
UpperCAmelCase__ =get_job_time(args.workflow_run_id)
UpperCAmelCase__ =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 442
|
"""simple docstring"""
import inspect
import unittest
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__lowercase = inspect.getmembers(A_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__lowercase = """k-diffusion"""
elif backend == "invisible_watermark":
__lowercase = """invisible-watermark"""
assert backend in deps, F'''{backend} is not in the deps table!'''
| 442
| 1
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __magic_name__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowercase : int=2 , _lowercase : List[Any]=3 , _lowercase : Union[str, Any]=64 , _lowercase : List[str]=None ):
"""simple docstring"""
_UpperCamelCase: str = np.random.default_rng(_lowercase )
_UpperCamelCase: Union[str, Any] = length
_UpperCamelCase: Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_UpperCamelCase: Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Tuple ):
"""simple docstring"""
return self.length
def __getitem__( self : Dict , _lowercase : Dict ):
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __magic_name__ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : int , _lowercase : Optional[int]=0 , _lowercase : List[str]=0 , _lowercase : Dict=False ):
"""simple docstring"""
super().__init__()
_UpperCamelCase: Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_UpperCamelCase: int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_UpperCamelCase: Any = True
def lowerCAmelCase ( self : List[Any] , _lowercase : Dict=None ):
"""simple docstring"""
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
_UpperCamelCase: Tuple = False
return x * self.a[0] + self.b[0]
class __magic_name__ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowercase : Dict=0 , _lowercase : List[Any]=0 , _lowercase : Tuple=False ):
"""simple docstring"""
super().__init__()
_UpperCamelCase: List[str] = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
_UpperCamelCase: Optional[Any] = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
_UpperCamelCase: Optional[Any] = True
def lowerCAmelCase ( self : Union[str, Any] , _lowercase : List[Any]=None ):
"""simple docstring"""
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
_UpperCamelCase: str = False
return x * self.a + self.b
def lowerCAmelCase_ ( lowercase: Dict , lowercase: int = 16 ) -> Dict:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
_UpperCamelCase: Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase: List[str] = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
_UpperCamelCase: List[Any] = load_dataset('''csv''' , data_files=lowercase )
_UpperCamelCase: str = datasets['''train'''].unique('''label''' )
_UpperCamelCase: int = {v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase: List[str] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase: int = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase , max_length=lowercase , padding='''max_length''' )
if "label" in examples:
_UpperCamelCase: Dict = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase: Dict = datasets.map(
lowercase , batched=lowercase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(lowercase: Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_UpperCamelCase: List[Any] = DataLoader(tokenized_datasets['''train'''] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
_UpperCamelCase: int = DataLoader(tokenized_datasets['''validation'''] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 271
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 271
| 1
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
snake_case = TypeVar("""T""")
snake_case = Union[List[T], Tuple[T, ...]]
snake_case = Union[T, List[T], Dict[str, T]]
snake_case = Union[str, bytes, os.PathLike]
| 488
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : int = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : str = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Optional[int] = output.images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Any = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 488
| 1
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ : str = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : Optional[Any] = "ResNetConfig"
# Base docstring
UpperCAmelCase_ : str = "microsoft/resnet-50"
UpperCAmelCase_ : Dict = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ : Optional[int] = "microsoft/resnet-50"
UpperCAmelCase_ : Optional[int] = "tiger cat"
UpperCAmelCase_ : str = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 3 , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" ) -> int:
super().__init__()
_a : Union[str, Any] = nn.Convad(
lowerCamelCase_ , lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=kernel_size // 2 , bias=lowerCamelCase_ )
_a : List[str] = nn.BatchNormad(lowerCamelCase_ )
_a : Any = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCamelCase ( self , lowerCamelCase_ ) -> int:
_a : List[str] = self.convolution(lowerCamelCase_ )
_a : Dict = self.normalization(lowerCamelCase_ )
_a : Union[str, Any] = self.activation(lowerCamelCase_ )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> int:
super().__init__()
_a : str = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_a : Dict = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_a : int = config.num_channels
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Optional[int]:
_a : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_a : str = self.embedder(lowerCamelCase_ )
_a : Optional[int] = self.pooler(lowerCamelCase_ )
return embedding
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 ) -> Union[str, Any]:
super().__init__()
_a : List[str] = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , stride=lowerCamelCase_ , bias=lowerCamelCase_ )
_a : str = nn.BatchNormad(lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Optional[Any]:
_a : List[Any] = self.convolution(lowerCamelCase_ )
_a : Union[str, Any] = self.normalization(lowerCamelCase_ )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" ) -> List[str]:
super().__init__()
_a : str = in_channels != out_channels or stride != 1
_a : Any = (
ResNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
_a : Any = nn.Sequential(
ResNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) , ResNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , activation=lowerCamelCase_ ) , )
_a : int = ACTaFN[activation]
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Tuple:
_a : Optional[Any] = hidden_state
_a : Dict = self.layer(lowerCamelCase_ )
_a : Tuple = self.shortcut(lowerCamelCase_ )
hidden_state += residual
_a : int = self.activation(lowerCamelCase_ )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" , lowerCamelCase_ = 4 ) -> Dict:
super().__init__()
_a : Union[str, Any] = in_channels != out_channels or stride != 1
_a : Union[str, Any] = out_channels // reduction
_a : Union[str, Any] = (
ResNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
_a : str = nn.Sequential(
ResNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) , ResNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
_a : str = ACTaFN[activation]
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
_a : Dict = hidden_state
_a : List[Any] = self.layer(lowerCamelCase_ )
_a : Tuple = self.shortcut(lowerCamelCase_ )
hidden_state += residual
_a : Optional[Any] = self.activation(lowerCamelCase_ )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , ) -> str:
super().__init__()
_a : Union[str, Any] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_a : Dict = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , activation=config.hidden_act ) , *[layer(lowerCamelCase_ , lowerCamelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Any:
_a : int = input
for layer in self.layers:
_a : List[str] = layer(lowerCamelCase_ )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__()
_a : str = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_a : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ ) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> str:
_a : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : Dict = hidden_states + (hidden_state,)
_a : Optional[Any] = stage_module(lowerCamelCase_ )
if output_hidden_states:
_a : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ , )
class a ( UpperCamelCase__ ):
'''simple docstring'''
__lowerCAmelCase : List[Any] = ResNetConfig
__lowerCAmelCase : Dict = 'resnet'
__lowerCAmelCase : Dict = 'pixel_values'
__lowerCAmelCase : str = True
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Any:
if isinstance(lowerCamelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> Tuple:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_a : Any = value
UpperCAmelCase_ : int = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase_ : List[Any] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , UpperCamelCase__ , )
class a ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Dict:
super().__init__(lowerCamelCase_ )
_a : Dict = config
_a : Union[str, Any] = ResNetEmbeddings(lowerCamelCase_ )
_a : Optional[Any] = ResNetEncoder(lowerCamelCase_ )
_a : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> Tuple:
_a : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[Any] = self.embedder(lowerCamelCase_ )
_a : Optional[Any] = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
_a : Optional[int] = encoder_outputs[0]
_a : Dict = self.pooler(lowerCamelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , UpperCamelCase__ , )
class a ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Dict:
super().__init__(lowerCamelCase_ )
_a : Optional[Any] = config.num_labels
_a : List[Any] = ResNetModel(lowerCamelCase_ )
# classification head
_a : Tuple = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> int:
_a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Dict = self.resnet(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
_a : Any = outputs.pooler_output if return_dict else outputs[1]
_a : Optional[Any] = self.classifier(lowerCamelCase_ )
_a : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Union[str, Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : str = 'single_label_classification'
else:
_a : Dict = 'multi_label_classification'
if self.config.problem_type == "regression":
_a : Optional[int] = MSELoss()
if self.num_labels == 1:
_a : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a : str = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
_a : Union[str, Any] = CrossEntropyLoss()
_a : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : Optional[int] = BCEWithLogitsLoss()
_a : str = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
_a : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n """ , UpperCamelCase__ , )
class a ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__(lowerCamelCase_ )
super()._init_backbone(lowerCamelCase_ )
_a : Dict = [config.embedding_size] + config.hidden_sizes
_a : Union[str, Any] = ResNetEmbeddings(lowerCamelCase_ )
_a : Tuple = ResNetEncoder(lowerCamelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@replace_return_docstrings(output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> Tuple:
_a : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : int = self.embedder(lowerCamelCase_ )
_a : str = self.encoder(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
_a : List[str] = outputs.hidden_states
_a : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_a : Tuple = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCamelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCamelCase_ , )
| 120
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def lowerCAmelCase__ ( self ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = BioGptModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
a_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
a_ = BioGptForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ):
a_ = BioGptModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# create attention mask
a_ = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase )
a_ = self.seq_length // 2
a_ = 0
# first forward pass
a_ , a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
a_ = ids_tensor((1,) , UpperCAmelCase ).item() + 1
a_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
a_ = random_other_next_tokens
# append to next input_ids and attn_mask
a_ = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase )] , dim=1 , )
# get two different outputs
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )["""last_hidden_state"""]
a_ = model(UpperCAmelCase , past_key_values=UpperCAmelCase , attention_mask=UpperCAmelCase )["""last_hidden_state"""]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ = output_from_no_past[:, -1, random_slice_idx].detach()
a_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ):
a_ = BioGptModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
a_ = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase )
# first forward pass
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
a_ , a_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )["""last_hidden_state"""]
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[
"""last_hidden_state"""
]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , UpperCAmelCase=False ):
a_ = BioGptForCausalLM(UpperCAmelCase )
model.to(UpperCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
a_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowerCAmelCase__ ( self , UpperCAmelCase , *UpperCAmelCase ):
a_ = BioGptModel(UpperCAmelCase )
a_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ):
a_ = self.num_labels
a_ = BioGptForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Optional[int] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : int = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Union[str, Any] = False
def lowerCAmelCase__ ( self ):
a_ = BioGptModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase , gradient_checkpointing=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
a_ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(UpperCAmelCase )
a_ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
a_ = """left"""
# Define PAD Token = EOS Token = 50256
a_ = tokenizer.eos_token
a_ = model.config.eos_token_id
# use different length sentences to test batching
a_ = [
"""Hello, my dog is a little""",
"""Today, I""",
]
a_ = tokenizer(UpperCAmelCase , return_tensors="""pt""" , padding=UpperCAmelCase )
a_ = inputs["""input_ids"""].to(UpperCAmelCase )
a_ = model.generate(
input_ids=UpperCAmelCase , attention_mask=inputs["""attention_mask"""].to(UpperCAmelCase ) , )
a_ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(UpperCAmelCase )
a_ = model.generate(input_ids=UpperCAmelCase )
a_ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
a_ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(UpperCAmelCase )
a_ = model.generate(input_ids=UpperCAmelCase , max_length=model.config.max_length - num_paddings )
a_ = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
a_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase )
a_ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase )
a_ = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def lowerCAmelCase__ ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = BioGptModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = 3
a_ = input_dict["""input_ids"""]
a_ = input_ids.ne(1 ).to(UpperCAmelCase )
a_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a_ = BioGptForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = 3
a_ = """multi_label_classification"""
a_ = input_dict["""input_ids"""]
a_ = input_ids.ne(1 ).to(UpperCAmelCase )
a_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a_ = BioGptForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ):
a_ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
a_ = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
a_ = model(UpperCAmelCase )[0]
a_ = 4_23_84
a_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
a_ = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self ):
a_ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
a_ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(UpperCAmelCase )
torch.manual_seed(0 )
a_ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(UpperCAmelCase )
a_ = model.generate(
**UpperCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=UpperCAmelCase , )
a_ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase )
a_ = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 263
| 0
|
import heapq
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: dict ) -> set[int]:
_UpperCAmelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCAmelCase , [-1 * len(lowerCAmelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCAmelCase : int = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCAmelCase : Optional[Any] = heapq.heappop(lowerCAmelCase )[1][0]
chosen_vertices.add(lowerCAmelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCAmelCase : Dict = elem[1][1].index(lowerCAmelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCAmelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 467
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE_ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class a ( unittest.TestCase , UpperCAmelCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = load_tool("text-question-answering" )
self.tool.setup()
_UpperCAmelCase : str = load_tool("text-question-answering" , remote=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.tool(A_ , "What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.remote_tool(A_ , "What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = self.tool(text=A_ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.remote_tool(text=A_ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
| 467
| 1
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple=13 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : int=16 , SCREAMING_SNAKE_CASE_ : Dict=[1, 2, 1] , SCREAMING_SNAKE_CASE_ : Dict=[2, 2, 4] , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2.0 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : int=1e-5 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : List[Any]=10 , SCREAMING_SNAKE_CASE_ : Any=8 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = patch_norm
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = is_training
lowerCamelCase__ = scope
lowerCamelCase__ = use_labels
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = encoder_stride
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Union[str, Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = SwinvaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCamelCase__ = SwinvaForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ = 1
lowerCamelCase__ = SwinvaForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = self.type_sequence_label_size
lowerCamelCase__ = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
snake_case = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def __UpperCAmelCase ( self : int ):
lowerCamelCase__ = SwinvaModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
def __UpperCAmelCase ( self : int ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def __UpperCAmelCase ( self : Any ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def __UpperCAmelCase ( self : List[Any] ):
pass
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
for model_class in self.all_model_classes:
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = outputs.attentions
lowerCamelCase__ = len(self.model_tester.depths )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ = True
lowerCamelCase__ = config.window_size**2
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
lowerCamelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCamelCase__ = 2
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swinv2 has a different seq_length
lowerCamelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = reshaped_hidden_states[0].shape
lowerCamelCase__ = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = 3
lowerCamelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __UpperCAmelCase ( self : int ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = SwinvaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Optional[int] ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : int ):
lowerCamelCase__ = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 129
|
"""simple docstring"""
__magic_name__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _A ( __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = [False] * len(__lowercase )
lowerCamelCase__ = [s]
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = [-1] * (len(__lowercase ))
lowerCamelCase__ = 0
lowerCamelCase__ = []
lowerCamelCase__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
lowerCamelCase__ = float("""Inf""" )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(__lowercase , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
for i in range(len(__lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 129
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( A_ ):
if len(lowerCAmelCase__ ) == 0:
return []
__lowerCamelCase , __lowerCamelCase = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
__lowerCamelCase = int(max_value - min_value ) + 1
__lowerCamelCase = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 714
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def lowerCamelCase_ ( A_ , A_ , A_ ):
__lowerCamelCase = hf_hub_url(repo_id=A_ , path=A_ , revision=A_ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A_ )}'''
| 575
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ :List[Any] = logging.get_logger(__name__)
a_ :Dict = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : int = '''deit'''
def __init__( self : int , _lowercase : Optional[int]=7_68 , _lowercase : List[Any]=12 , _lowercase : Optional[Any]=12 , _lowercase : Dict=30_72 , _lowercase : Any="gelu" , _lowercase : Optional[Any]=0.0 , _lowercase : List[Any]=0.0 , _lowercase : Optional[Any]=0.02 , _lowercase : List[str]=1E-12 , _lowercase : Optional[int]=2_24 , _lowercase : Optional[int]=16 , _lowercase : int=3 , _lowercase : int=True , _lowercase : Optional[int]=16 , **_lowercase : Union[str, Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : int = qkv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_stride
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Any = version.parse('''1.11''' )
@property
def lowercase__ ( self : Union[str, Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self : Any ):
return 1E-4
| 35
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35
| 1
|
"""simple docstring"""
lowercase__ :dict[tuple[int, int, int], int] = {}
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__UpperCAmelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__UpperCAmelCase : Any = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__UpperCAmelCase : List[Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__UpperCAmelCase : str = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__UpperCAmelCase : Optional[Any] = state_late + state_absent + state_ontime
__UpperCAmelCase : int = prizestrings
return prizestrings
def lowerCamelCase_ ( UpperCAmelCase_ = 30 ) ->int:
"""simple docstring"""
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 374
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCAmelCase : List[str] = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : Any = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__UpperCAmelCase : Dict = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__UpperCAmelCase : List[str] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def A_ ( self : Optional[Any] , **__lowercase : Tuple ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def A_ ( self : Any , **__lowercase : Optional[Any] ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def A_ ( self : Dict , **__lowercase : str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def A_ ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase : int = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCAmelCase : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__UpperCAmelCase : Tuple = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCAmelCase : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__UpperCAmelCase : Tuple = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__UpperCAmelCase : List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def A_ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : List[Any] = self.prepare_image_inputs()
__UpperCAmelCase : Dict = image_processor(__lowercase , return_tensors='''np''' )
__UpperCAmelCase : Optional[int] = processor(images=__lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : int = '''lower newer'''
__UpperCAmelCase : int = processor(text=__lowercase )
__UpperCAmelCase : Any = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : List[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : str = '''lower newer'''
__UpperCAmelCase : int = self.prepare_image_inputs()
__UpperCAmelCase : str = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = self.prepare_image_inputs()
__UpperCAmelCase : Tuple = processor(images=__lowercase , visual_prompt=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def A_ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : int = processor.batch_decode(__lowercase )
__UpperCAmelCase : int = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
| 374
| 1
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a_ :str = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
a_ :int = 'sshleifer/student_marian_en_ro_6_1'
a_ :List[str] = 'sshleifer/tiny-mbart'
@require_torch
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : List[str] , _lowercase : Dict=False , _lowercase : Optional[Any]=None , _lowercase : Dict=True , _lowercase : str=True , _lowercase : Optional[Any]=True , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Any = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
SCREAMING_SNAKE_CASE__ : List[str] = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [log for log in logs if '''eval_loss''' in log.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
SCREAMING_SNAKE_CASE__ : Optional[Any] = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowercase__ ( self : int ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowercase__ ( self : Optional[int] ):
self.run_seqaseq_quick(distributed=_lowercase )
@require_torch_multi_gpu
def lowercase__ ( self : Any ):
self.run_seqaseq_quick(distributed=_lowercase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : str ):
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : List[Any] ):
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : int ):
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : Any ):
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase )
@require_apex
@require_torch_gpu
def lowercase__ ( self : Any ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def lowercase__ ( self : Optional[int] , _lowercase : Optional[int] ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
SCREAMING_SNAKE_CASE__ : Dict = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = experiments[experiment_id]
SCREAMING_SNAKE_CASE__ : str = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
SCREAMING_SNAKE_CASE__ : int = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''] )
SCREAMING_SNAKE_CASE__ : Tuple = len(re.findall(_lowercase , cl.err ) )
self.assertEqual(_lowercase , data['''n_matches'''] )
@slow
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
SCREAMING_SNAKE_CASE__ : Any = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''' ) ).log_history
SCREAMING_SNAKE_CASE__ : str = [log for log in logs if '''eval_loss''' in log.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = eval_metrics[0]
SCREAMING_SNAKE_CASE__ : Any = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase )
# test if do_predict saves generations and metrics
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.listdir(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {os.path.basename(_lowercase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowercase__ ( self : Dict ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase : str ) -> Tuple[int, float]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''--skip_memory_metrics 0'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.run_trainer(
max_len=1_28 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
SCREAMING_SNAKE_CASE__ : Any = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''' ) ).log_history
SCREAMING_SNAKE_CASE__ : Tuple = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
SCREAMING_SNAKE_CASE__ : str = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE__ : str = gpu_peak_mem_orig + gpu_alloc_mem_orig
SCREAMING_SNAKE_CASE__ : List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE__ : Optional[Any] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
SCREAMING_SNAKE_CASE__ : List[str] = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
_lowercase , _lowercase , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def lowercase__ ( self : str , _lowercase : int , _lowercase : str , _lowercase : int , _lowercase : float = 3E-3 , _lowercase : str = "adafactor" , _lowercase : bool = False , _lowercase : str = None , _lowercase : int = 0 , _lowercase : bool = True , _lowercase : bool = True , _lowercase : bool = True , _lowercase : bool = True , _lowercase : int = None , ):
SCREAMING_SNAKE_CASE__ : Any = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
SCREAMING_SNAKE_CASE__ : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Dict = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(_lowercase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(_lowercase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(_lowercase )}
""".split()
SCREAMING_SNAKE_CASE__ : List[Any] = '''
--do_predict
'''.split()
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
SCREAMING_SNAKE_CASE__ : str = get_gpu_count()
SCREAMING_SNAKE_CASE__ : int = get_torch_dist_unique_port()
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
SCREAMING_SNAKE_CASE__ : int = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env() )
else:
SCREAMING_SNAKE_CASE__ : int = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase ):
main()
return output_dir
| 35
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35
| 1
|
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : List[str] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_lowercase : Dict = {
"Salesforce/codegen-350M-mono": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
_a = CodeGenTokenizer
def __init__( self : Optional[Any], lowerCamelCase : Dict=None, lowerCamelCase : str=None, lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]="<|endoftext|>", lowerCamelCase : Union[str, Any]="<|endoftext|>", lowerCamelCase : List[Any]="<|endoftext|>", lowerCamelCase : str=False, **lowerCamelCase : int, )-> List[Any]:
super().__init__(
lowerCamelCase, lowerCamelCase, tokenizer_file=lowerCamelCase, unk_token=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, add_prefix_space=lowerCamelCase, **lowerCamelCase, )
if kwargs.pop('''add_bos_token''', lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =kwargs.pop('''name_or_path''', '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowerCamelCase__ : List[Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', lowerCamelCase ) != add_prefix_space:
lowerCamelCase__ : Any =getattr(lowerCamelCase, pre_tok_state.pop('''type''' ) )
lowerCamelCase__ : Union[str, Any] =add_prefix_space
lowerCamelCase__ : Optional[Any] =pre_tok_class(**lowerCamelCase )
lowerCamelCase__ : Tuple =add_prefix_space
def snake_case ( self : Optional[Any], *lowerCamelCase : Optional[Any], **lowerCamelCase : List[Any] )-> BatchEncoding:
lowerCamelCase__ : List[Any] =kwargs.get('''is_split_into_words''', lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : int, *lowerCamelCase : Dict, **lowerCamelCase : int )-> BatchEncoding:
lowerCamelCase__ : Any =kwargs.get('''is_split_into_words''', lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
lowerCamelCase__ : Dict =self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], lowerCamelCase : bool = False, lowerCamelCase : bool = None, lowerCamelCase : Optional[List[str]] = None, **lowerCamelCase : Tuple, )-> str:
lowerCamelCase__ : Tuple =super().decode(
token_ids=lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, **lowerCamelCase, )
if truncate_before_pattern is not None and len(lowerCamelCase ) > 0:
lowerCamelCase__ : List[str] =self.truncate(lowerCamelCase, lowerCamelCase )
return decoded_text
def snake_case ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : str )-> Optional[int]:
def find_re(lowerCamelCase : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : str ):
lowerCamelCase__ : List[str] =pattern.search(lowerCamelCase, lowerCamelCase )
return m.start() if m else -1
lowerCamelCase__ : int =[re.compile(lowerCamelCase, re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCamelCase__ : Union[str, Any] =list(re.finditer('''^print''', lowerCamelCase, re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
lowerCamelCase__ : Dict =completion[: prints[1].start()]
lowerCamelCase__ : Dict =list(re.finditer('''^def''', lowerCamelCase, re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
lowerCamelCase__ : Union[str, Any] =completion[: defs[1].start()]
lowerCamelCase__ : str =0
lowerCamelCase__ : Union[str, Any] =[
pos for pos in [find_re(lowerCamelCase, lowerCamelCase, lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(lowerCamelCase ) > 0:
return completion[: min(lowerCamelCase )]
else:
return completion
| 718
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = args.pruning_method
lowercase__ = args.threshold
lowercase__ = args.model_name_or_path.rstrip('''/''' )
lowercase__ = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
lowercase__ = torch.load(os.path.join(SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
lowercase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
lowercase__ = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
lowercase__ = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
lowercase__ = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE , threshold=SCREAMING_SNAKE_CASE )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[f'{prefix_}mask_scores']
lowercase__ = TopKBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[f'{prefix_}mask_scores']
lowercase__ = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[f'{prefix_}mask_scores']
lowercase__ , lowercase__ = -0.1, 1.1
lowercase__ = torch.sigmoid(SCREAMING_SNAKE_CASE )
lowercase__ = s * (r - l) + l
lowercase__ = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
lowercase__ = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(SCREAMING_SNAKE_CASE )}' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
shutil.copytree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'\nCreated folder {target_model_path}' )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
lowerCAmelCase = parser.parse_args()
main(args)
| 43
|
'''simple docstring'''
def _A ( A ) -> list:
if len(A ) <= 1:
return [tuple(A )]
lowercase : Dict = []
def generate(A ,A ):
lowercase : List[Any] = [0] * n
res.append(tuple(A ) )
lowercase : Tuple = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowercase , lowercase : List[Any] = arr[i], arr[0]
else:
lowercase , lowercase : Optional[Any] = arr[i], arr[c[i]]
res.append(tuple(A ) )
c[i] += 1
lowercase : Optional[Any] = 0
else:
lowercase : Any = 0
i += 1
generate(len(A ) ,A )
return res
if __name__ == "__main__":
lowerCAmelCase : Any = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : List[str] = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 372
| 0
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 703
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowercase = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ )
__lowercase = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase__ , os.listdir(lowerCAmelCase__ )[0] , '''snapshots''' ) )]
__lowercase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCAmelCase__ )
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 4
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(lowerCAmelCase__ )
__lowercase = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = shard(lowerCAmelCase__ )
__lowercase = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
__lowercase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase__ ) == num_samples
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=lowerCAmelCase__ )
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 50
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(lowerCAmelCase__ )
__lowercase = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = shard(lowerCAmelCase__ )
__lowercase = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ )
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 50
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(lowerCAmelCase__ )
__lowercase = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = shard(lowerCAmelCase__ )
__lowercase = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 50
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(lowerCAmelCase__ )
__lowercase = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = shard(lowerCAmelCase__ )
__lowercase = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
__lowercase = scheduler.create_state()
__lowercase = scheduler_state
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 50
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(lowerCAmelCase__ )
__lowercase = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = shard(lowerCAmelCase__ )
__lowercase = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase__ )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , )
__lowercase = replicate(lowerCAmelCase__ )
__lowercase = pipeline.prepare_inputs(lowerCAmelCase__ )
__lowercase = shard(lowerCAmelCase__ )
__lowercase = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
__lowercase = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , use_memory_efficient_attention=lowerCAmelCase__ , )
__lowercase = replicate(lowerCAmelCase__ )
__lowercase = pipeline.prepare_inputs(lowerCAmelCase__ )
__lowercase = shard(lowerCAmelCase__ )
__lowercase = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
__lowercase = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 534
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : List[Any] = logging.get_logger(__name__)
__a : List[Any] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = '''audio-spectrogram-transformer'''
def __init__( self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=16 , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=10 , lowerCAmelCase__=10_24 , lowerCAmelCase__=1_28 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = patch_size
__lowercase = qkv_bias
__lowercase = frequency_stride
__lowercase = time_stride
__lowercase = max_length
__lowercase = num_mel_bins
| 534
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=1 / 255 , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_pad
def snake_case_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case_ ( self , __A , __A=False ):
if not batched:
__a = image_inputs[0]
if isinstance(__A , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["""shortest_edge"""] * h / w )
__a = self.size["""shortest_edge"""]
elif w > h:
__a = self.size["""shortest_edge"""]
__a = int(self.size["""shortest_edge"""] * w / h )
else:
__a = self.size["""shortest_edge"""]
__a = self.size["""shortest_edge"""]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(__A , key=lambda __A : item[0] )[0]
__a = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = DetrImageProcessor if is_vision_available() else None
def snake_case_ ( self ):
__a = DetrImageProcessingTester(self )
@property
def snake_case_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """rescale_factor""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
def snake_case_ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __A )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(__A , batched=__A )
__a = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(__A , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(__A , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self ):
# prepare image and target
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a = json.loads(f.read() )
__a = {"""image_id""": 39769, """annotations""": target}
# encode them
__a = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def snake_case_ ( self ):
# prepare image, target and masks_path
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a = json.loads(f.read() )
__a = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
__a = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__a = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__a = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 209
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def a (lowerCAmelCase__ ):
__a = botoa.client("""iam""" )
__a = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCAmelCase__ , AssumeRolePolicyDocument=json.dumps(lowerCAmelCase__ , indent=2 ) )
__a = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCAmelCase__ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(lowerCAmelCase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def a (lowerCAmelCase__ ):
__a = botoa.client("""iam""" )
return iam_client.get_role(RoleName=lowerCAmelCase__ )["Role"]["Arn"]
def a ():
__a = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , lowerCAmelCase__ , )
__a = None
if credentials_configuration == 0:
__a = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__a = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__a = _ask_field("""AWS Access Key ID: """ )
__a = aws_access_key_id
__a = _ask_field("""AWS Secret Access Key: """ )
__a = aws_secret_access_key
__a = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__a = aws_region
__a = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , lowerCAmelCase__ , )
if role_management == 0:
__a = _ask_field("""Enter your IAM role name: """ )
else:
__a = """accelerate_sagemaker_execution_role"""
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(lowerCAmelCase__ )
__a = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_custom_docker_image:
__a = _ask_field("""Enter your Docker image: """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() )
__a = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_sagemaker_inputs_enabled:
__a = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , )
__a = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_sagemaker_metrics_enabled:
__a = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , )
__a = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__a = {}
__a = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__a = """dynamo_"""
__a = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__a = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__a = _ask_options(
"""Which mode do you want to use?""" , lowerCAmelCase__ , lambda lowerCAmelCase__ : TORCH_DYNAMO_MODES[int(lowerCAmelCase__ )] , default="""default""" , )
__a = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__a = _ask_options(
lowerCAmelCase__ , lowerCAmelCase__ , lambda lowerCAmelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCAmelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__a = _ask_field(lowerCAmelCase__ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , default="""ml.p3.2xlarge""" )
__a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__a = _ask_field(
"""How many machines do you want use? [1]: """ , lowerCAmelCase__ , default=1 , )
__a = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=lowerCAmelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowerCAmelCase__ , use_cpu=lowerCAmelCase__ , dynamo_config=lowerCAmelCase__ , eca_instance_type=lowerCAmelCase__ , profile=lowerCAmelCase__ , region=lowerCAmelCase__ , iam_role_name=lowerCAmelCase__ , mixed_precision=lowerCAmelCase__ , num_machines=lowerCAmelCase__ , sagemaker_inputs_file=lowerCAmelCase__ , sagemaker_metrics_file=lowerCAmelCase__ , )
| 209
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Union[str, Any]=7 , lowerCamelCase__ :int=True , lowerCamelCase__ :Tuple=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Dict=32 , lowerCamelCase__ :Union[str, Any]=2 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :Any=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Union[str, Any]=5_12 , lowerCamelCase__ :List[Any]=16 , lowerCamelCase__ :int=2 , lowerCamelCase__ :Optional[int]=0.02 , lowerCamelCase__ :Tuple=3 , lowerCamelCase__ :Optional[Any]=4 , lowerCamelCase__ :str=None , lowerCamelCase__ :Tuple=10_00 , ):
UpperCamelCase__ :Dict = parent
UpperCamelCase__ :int = batch_size
UpperCamelCase__ :str = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :Dict = use_input_mask
UpperCamelCase__ :Optional[int] = use_token_type_ids
UpperCamelCase__ :Optional[int] = use_labels
UpperCamelCase__ :Tuple = vocab_size
UpperCamelCase__ :Any = hidden_size
UpperCamelCase__ :Union[str, Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Any = intermediate_size
UpperCamelCase__ :int = hidden_act
UpperCamelCase__ :Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :List[str] = type_vocab_size
UpperCamelCase__ :Optional[int] = type_sequence_label_size
UpperCamelCase__ :Tuple = initializer_range
UpperCamelCase__ :List[Any] = num_labels
UpperCamelCase__ :int = num_choices
UpperCamelCase__ :str = scope
UpperCamelCase__ :Tuple = range_bbox
def __a ( self :Dict ):
UpperCamelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :Dict = bbox[i, j, 3]
UpperCamelCase__ :Dict = bbox[i, j, 1]
UpperCamelCase__ :Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :int = bbox[i, j, 2]
UpperCamelCase__ :Tuple = bbox[i, j, 0]
UpperCamelCase__ :Dict = t
UpperCamelCase__ :int = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :Tuple = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = TFLayoutLMModel(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
UpperCamelCase__ :Dict = model(lowerCamelCase__ , lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = TFLayoutLMForMaskedLM(config=lowerCamelCase__ )
UpperCamelCase__ :str = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :int , lowerCamelCase__ :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :str = self.num_labels
UpperCamelCase__ :int = TFLayoutLMForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Any , lowerCamelCase__ :Any , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :str = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Union[str, Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Dict ):
UpperCamelCase__ :Union[str, Any] = TFLayoutLMForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Dict = config_and_inputs
UpperCamelCase__ :Union[str, Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : int = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_snake_case : int = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Tuple = False
_snake_case : Dict = True
_snake_case : Optional[int] = 10
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Tuple = TFLayoutLMModelTester(self )
UpperCamelCase__ :int = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Optional[Any] ):
self.config_tester.run_common_tests()
def __a ( self :Optional[Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Optional[Any] = TFLayoutLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def __a ( self :Dict ):
pass
def A ( ) -> Union[str, Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
UpperCamelCase__ :List[str] = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
UpperCamelCase__ :Tuple = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
UpperCamelCase__ :List[str] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCamelCase__ :Dict = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :List[str] ):
UpperCamelCase__ :Tuple = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase__ :int = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
# test the sequence output on [0, :3, :3]
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
UpperCamelCase__ :Tuple = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowerCamelCase__ , atol=1e-3 ) )
@slow
def __a ( self :int ):
# initialize model with randomly initialized sequence classification head
UpperCamelCase__ :Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase__ :List[str] = model(
input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
UpperCamelCase__ :Optional[int] = outputs.loss
UpperCamelCase__ :str = (2,)
self.assertEqual(loss.shape , lowerCamelCase__ )
# test the shape of the logits
UpperCamelCase__ :Any = outputs.logits
UpperCamelCase__ :str = (2, 2)
self.assertEqual(logits.shape , lowerCamelCase__ )
@slow
def __a ( self :Dict ):
# initialize model with randomly initialized token classification head
UpperCamelCase__ :Tuple = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase__ :Optional[int] = model(
input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
# test the shape of the logits
UpperCamelCase__ :Union[str, Any] = outputs.logits
UpperCamelCase__ :Any = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
# initialize model with randomly initialized token classification head
UpperCamelCase__ :Tuple = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase__ :Optional[int] = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
# test the shape of the logits
UpperCamelCase__ :int = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , lowerCamelCase__ )
self.assertEqual(outputs.end_logits.shape , lowerCamelCase__ )
| 45
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCamelCase__ ( __A :Optional[int] ,__A :str ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __snake_case :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __snake_case :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
__SCREAMING_SNAKE_CASE = field(metadata={"help": "Should contain the data files for the task."} )
__SCREAMING_SNAKE_CASE = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase__ ( ):
"""simple docstring"""
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,__A )
# Set seed
set_seed(training_args.seed )
try:
__snake_case = processors[data_args.task_name]()
__snake_case = processor.get_labels()
__snake_case = len(__A )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__A ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
__snake_case = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=__A ,cache_dir=model_args.cache_dir ,)
# Get datasets
__snake_case = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__A ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
__snake_case = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__A ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(__A :EvalPrediction ) -> Dict:
__snake_case = np.argmax(p.predictions ,axis=1 )
return {"acc": simple_accuracy(__A ,p.label_ids )}
# Data collator
__snake_case = DataCollatorWithPadding(__A ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case = Trainer(
model=__A ,args=__A ,train_dataset=__A ,eval_dataset=__A ,compute_metrics=__A ,data_collator=__A ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__snake_case = trainer.evaluate()
__snake_case = os.path.join(training_args.output_dir ,"""eval_results.txt""" )
if trainer.is_world_master():
with open(__A ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" ,__A ,__A )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__A )
return results
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 268
| 0
|
"""simple docstring"""
def __A ( a_ :int) -> list[int]:
if length <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive integer.''')
return [n * (2 * n - 1) for n in range(a_)]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 700
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A = logging.get_logger(__name__)
A = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''detr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=100 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , **_UpperCAmelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a : Any = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = backbone_config.get('''model_type''' )
__a : Tuple = CONFIG_MAPPING[backbone_model_type]
__a : Union[str, Any] = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
__a , __a , __a : Union[str, Any] = None, None, None
__a : Union[str, Any] = use_timm_backbone
__a : Any = backbone_config
__a : Tuple = num_channels
__a : int = num_queries
__a : str = d_model
__a : Any = encoder_ffn_dim
__a : int = encoder_layers
__a : Optional[int] = encoder_attention_heads
__a : Any = decoder_ffn_dim
__a : str = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : List[Any] = dropout
__a : Union[str, Any] = attention_dropout
__a : int = activation_dropout
__a : Dict = activation_function
__a : str = init_std
__a : int = init_xavier_std
__a : Optional[Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : str = encoder_layers
__a : List[str] = auxiliary_loss
__a : Optional[Any] = position_embedding_type
__a : Any = backbone
__a : Tuple = use_pretrained_backbone
__a : int = dilation
# Hungarian matcher
__a : str = class_cost
__a : Optional[Any] = bbox_cost
__a : Any = giou_cost
# Loss coefficients
__a : List[str] = mask_loss_coefficient
__a : Dict = dice_loss_coefficient
__a : str = bbox_loss_coefficient
__a : str = giou_loss_coefficient
__a : Any = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self ):
return self.d_model
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(backbone_config=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__a : Tuple = self.backbone_config.to_dict()
__a : Dict = self.__class__.model_type
return output
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-5
@property
def _lowerCamelCase ( self ):
return 12
| 101
| 0
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) != 0 )
def snake_case_ ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 149
|
from math import factorial
def snake_case_ ( lowerCAmelCase_ : int = 100 ):
return sum(map(lowerCAmelCase_ , str(factorial(lowerCAmelCase_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 149
| 1
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a : Dict = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase__ , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def A ( self , snake_case_ ) -> np.ndarray:
'''simple docstring'''
if self.framework == "tf":
__lowercase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__lowercase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def A ( self , snake_case_ ) -> np.ndarray:
'''simple docstring'''
__lowercase = self.get_masked_index(snake_case_ )
__lowercase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def A ( self , snake_case_ ) -> int:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def A ( self , snake_case_ , snake_case_=None , **snake_case_ ) -> Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
__lowercase = self.framework
__lowercase = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def A ( self , snake_case_ ) -> Any:
'''simple docstring'''
__lowercase = self.model(**snake_case_ )
__lowercase = model_inputs['''input_ids''']
return model_outputs
def A ( self , snake_case_ , snake_case_=5 , snake_case_=None ) -> Dict:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
__lowercase = target_ids.shape[0]
__lowercase = model_outputs['''input_ids'''][0]
__lowercase = model_outputs['''logits''']
if self.framework == "tf":
__lowercase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__lowercase = outputs.numpy()
__lowercase = outputs[0, masked_index, :]
__lowercase = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
__lowercase = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
__lowercase = tf.expand_dims(snake_case_ , 0 )
__lowercase = tf.math.top_k(snake_case_ , k=snake_case_ )
__lowercase , __lowercase = topk.values.numpy(), topk.indices.numpy()
else:
__lowercase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__lowercase = outputs[0, masked_index, :]
__lowercase = logits.softmax(dim=-1 )
if target_ids is not None:
__lowercase = probs[..., target_ids]
__lowercase , __lowercase = probs.topk(snake_case_ )
__lowercase = []
__lowercase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__lowercase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__lowercase = input_ids.numpy().copy()
if target_ids is not None:
__lowercase = target_ids[p].tolist()
__lowercase = p
# Filter padding out:
__lowercase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__lowercase = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
__lowercase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def A ( self , snake_case_ , snake_case_=None ) -> List[Any]:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
__lowercase = [targets]
try:
__lowercase = self.tokenizer.get_vocab()
except Exception:
__lowercase = {}
__lowercase = []
for target in targets:
__lowercase = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
__lowercase = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['''input_ids''']
if len(snake_case_ ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__lowercase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__lowercase = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__lowercase = np.array(snake_case_ )
return target_ids
def A ( self , snake_case_=None , snake_case_=None ) -> int:
'''simple docstring'''
__lowercase = {}
if targets is not None:
__lowercase = self.get_target_ids(snake_case_ , snake_case_ )
__lowercase = target_ids
if top_k is not None:
__lowercase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs
| 527
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 527
| 1
|
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
snake_case : Optional[Any] = data
snake_case : Tuple = previous
snake_case : List[str] = next_node
def __str__( self ):
"""simple docstring"""
return F'''{self.data}'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.data
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.next
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.previous
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = head
def __iter__( self ):
"""simple docstring"""
return self
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.current:
raise StopIteration
else:
snake_case : str = self.current.get_data()
snake_case : Union[str, Any] = self.current.get_next()
return value
class lowerCamelCase__ :
def __init__( self ):
"""simple docstring"""
snake_case : Optional[int] = None # First node in list
snake_case : List[str] = None # Last node in list
def __str__( self ):
"""simple docstring"""
snake_case : Tuple = self.head
snake_case : Dict = []
while current is not None:
nodes.append(current.get_data() )
snake_case : List[str] = current.get_next()
return " ".join(str(UpperCAmelCase__ ) for node in nodes )
def __contains__( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
snake_case : List[Any] = current.get_next()
return False
def __iter__( self ):
"""simple docstring"""
return LinkedListIterator(self.head )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.head is None:
snake_case : List[str] = node
snake_case : Optional[Any] = node
else:
self.insert_before_node(self.head , UpperCAmelCase__ )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.head is None:
self.set_head(UpperCAmelCase__ )
else:
self.insert_after_node(self.tail , UpperCAmelCase__ )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Tuple = Node(UpperCAmelCase__ )
if self.head is None:
self.set_head(UpperCAmelCase__ )
else:
self.set_tail(UpperCAmelCase__ )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = node
snake_case : Optional[Any] = node.previous
if node.get_previous() is None:
snake_case : str = node_to_insert
else:
snake_case : Optional[Any] = node_to_insert
snake_case : str = node_to_insert
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = node
snake_case : Any = node.next
if node.get_next() is None:
snake_case : Any = node_to_insert
else:
snake_case : Tuple = node_to_insert
snake_case : Optional[int] = node_to_insert
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[Any] = 1
snake_case : Any = Node(UpperCAmelCase__ )
snake_case : str = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCAmelCase__ , UpperCAmelCase__ )
return
current_position += 1
snake_case : int = node.next
self.insert_after_node(self.tail , UpperCAmelCase__ )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = self.head
while node:
if node.get_data() == item:
return node
snake_case : Union[str, Any] = node.get_next()
raise Exception("Node not found" )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (node := self.get_node(UpperCAmelCase__ )) is not None:
if node == self.head:
snake_case : List[str] = self.head.get_next()
if node == self.tail:
snake_case : Dict = self.tail.get_previous()
self.remove_node_pointers(UpperCAmelCase__ )
@staticmethod
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if node.get_next():
snake_case : Optional[Any] = node.previous
if node.get_previous():
snake_case : Any = node.next
snake_case : Any = None
snake_case : Tuple = None
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 134
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : str = tempfile.mkdtemp()
# fmt: off
snake_case : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : str = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , UpperCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , **UpperCAmelCase__ : Any ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , **UpperCAmelCase__ : str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : str = self.get_tokenizer()
snake_case : List[str] = self.get_image_processor()
snake_case : Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
snake_case : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case : Any = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
snake_case : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = self.get_image_processor()
snake_case : Any = self.get_tokenizer()
snake_case : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : List[Any] = image_processor(UpperCAmelCase__ , return_tensors='''np''' )
snake_case : Union[str, Any] = processor(images=UpperCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Any = self.get_image_processor()
snake_case : str = self.get_tokenizer()
snake_case : str = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : Dict = '''lower newer'''
snake_case : List[str] = processor(text=UpperCAmelCase__ )
snake_case : Dict = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = self.get_image_processor()
snake_case : Tuple = self.get_tokenizer()
snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : Optional[Any] = '''lower newer'''
snake_case : List[Any] = self.prepare_image_inputs()
snake_case : List[str] = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase__ ):
processor()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : int = self.get_image_processor()
snake_case : Dict = self.get_tokenizer()
snake_case : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : Optional[int] = processor.batch_decode(UpperCAmelCase__ )
snake_case : str = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Tuple = self.get_image_processor()
snake_case : Tuple = self.get_tokenizer()
snake_case : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : str = '''lower newer'''
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : Union[str, Any] = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 598
| 0
|
import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(_lowercase , _lowercase )
or isinstance(_lowercase , _lowercase )
or isinstance(_lowercase , _lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :Optional[int] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :Any = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :List[str] = [input_a, input_a, carry_in]
__UpperCamelCase :List[str] = qiskit.QuantumCircuit(_lowercase , _lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowercase ) # measure the last two qbits
__UpperCamelCase :int = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1_000 )
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 709
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=19 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> Optional[Any]:
__UpperCamelCase :Dict = parent
__UpperCamelCase :Optional[Any] = batch_size
__UpperCamelCase :Any = seq_length
__UpperCamelCase :List[str] = is_training
__UpperCamelCase :Any = use_input_mask
__UpperCamelCase :Optional[int] = use_token_type_ids
__UpperCamelCase :List[str] = use_labels
__UpperCamelCase :Tuple = vocab_size
__UpperCamelCase :List[Any] = hidden_size
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :Dict = intermediate_size
__UpperCamelCase :List[str] = hidden_act
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase :Optional[Any] = max_position_embeddings
__UpperCamelCase :List[Any] = type_vocab_size
__UpperCamelCase :int = type_sequence_label_size
__UpperCamelCase :str = initializer_range
__UpperCamelCase :Optional[Any] = num_labels
__UpperCamelCase :int = num_choices
__UpperCamelCase :Optional[Any] = scope
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :int = None
if self.use_input_mask:
__UpperCamelCase :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Dict = None
__UpperCamelCase :List[Any] = None
__UpperCamelCase :Tuple = None
if self.use_labels:
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :str = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__lowercase , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :int = EsmForProteinFolding(config=__lowercase).float()
model.to(__lowercase)
model.eval()
__UpperCamelCase :Tuple = model(__lowercase , attention_mask=__lowercase)
__UpperCamelCase :Any = model(__lowercase)
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Dict = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :List[str] = config_and_inputs
__UpperCamelCase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Optional[int] = False
a__ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
a__ : str = ()
a__ : Tuple = {} if is_torch_available() else {}
a__ : List[Any] = False
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Dict = EsmFoldModelTester(self)
__UpperCamelCase :Dict = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
@unittest.skip('''Does not support attention outputs''')
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip('''Esm does not support embedding resizing''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip('''Esm does not support embedding resizing''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''')
def UpperCamelCase__ ( self) -> str:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold only has one output format.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support input chunking.''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''')
def UpperCamelCase__ ( self) -> int:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCamelCase__ ( self) -> str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''')
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''').float()
model.eval()
__UpperCamelCase :Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
__UpperCamelCase :List[Any] = model(__lowercase)['''positions''']
__UpperCamelCase :Optional[int] = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __lowercase , atol=1E-4))
| 452
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = 'marian'
lowerCamelCase__ : List[Any] = ['past_key_values']
lowerCamelCase__ : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , UpperCAmelCase=5_81_01 , UpperCAmelCase=None , UpperCAmelCase=10_24 , UpperCAmelCase=12 , UpperCAmelCase=40_96 , UpperCAmelCase=16 , UpperCAmelCase=12 , UpperCAmelCase=40_96 , UpperCAmelCase=16 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=10_24 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=5_81_00 , UpperCAmelCase=False , UpperCAmelCase=5_81_00 , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=True , **UpperCAmelCase , ):
a_ = vocab_size
a_ = decoder_vocab_size or vocab_size
a_ = max_position_embeddings
a_ = d_model
a_ = encoder_ffn_dim
a_ = encoder_layers
a_ = encoder_attention_heads
a_ = decoder_ffn_dim
a_ = decoder_layers
a_ = decoder_attention_heads
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = activation_function
a_ = init_std
a_ = encoder_layerdrop
a_ = decoder_layerdrop
a_ = use_cache
a_ = encoder_layers
a_ = scale_embedding # scale factor will be sqrt(d_model) if True
a_ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
class a_ ( UpperCamelCase__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
a_ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
a_ = {0: """batch"""}
a_ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
a_ = {0: """batch""", 1: """decoder_sequence"""}
a_ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
a_ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
a_ , a_ = self.num_layers
for i in range(UpperCAmelCase ):
a_ = {0: """batch""", 2: """past_sequence + sequence"""}
a_ = {0: """batch""", 2: """past_sequence + sequence"""}
else:
a_ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
a_ = super().outputs
else:
a_ = super(UpperCAmelCase , self ).outputs
if self.use_past:
a_ , a_ = self.num_layers
for i in range(UpperCAmelCase ):
a_ = {0: """batch""", 2: """past_sequence + sequence"""}
a_ = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
a_ = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Generate decoder inputs
a_ = seq_length if not self.use_past else 1
a_ = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
a_ = dict(**UpperCAmelCase , **UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
a_ , a_ = common_inputs["""input_ids"""].shape
a_ = common_inputs["""decoder_input_ids"""].shape[1]
a_ , a_ = self.num_attention_heads
a_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ = decoder_seq_length + 3
a_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a_ = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase )] , dim=1 )
a_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a_ , a_ = self.num_layers
a_ = min(UpperCAmelCase , UpperCAmelCase )
a_ = max(UpperCAmelCase , UpperCAmelCase ) - min_num_layers
a_ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
) )
# TODO: test this.
a_ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(UpperCAmelCase , UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) )
return common_inputs
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
a_ = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
a_ , a_ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
a_ = seqlen + 2
a_ , a_ = self.num_layers
a_ , a_ = self.num_attention_heads
a_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ = common_inputs["""attention_mask"""].dtype
a_ = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
a_ = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(UpperCAmelCase )
]
return common_inputs
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a_ = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a_ = tokenizer.num_special_tokens_to_add(UpperCAmelCase )
a_ = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
a_ = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
a_ = dict(tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase ) )
return common_inputs
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
a_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
else:
a_ = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
return common_inputs
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
a_ = super()._flatten_past_key_values_(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
a_ = super(UpperCAmelCase , self )._flatten_past_key_values_(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@property
def lowerCAmelCase__ ( self ):
return 1e-4
| 263
|
'''simple docstring'''
import enum
import shutil
import sys
lowercase__ , lowercase__ =shutil.get_terminal_size()
lowercase__ ={'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class a_ ( enum.Enum ):
lowerCamelCase__ : int = 0
lowerCamelCase__ : Union[str, Any] = 1
def UpperCamelCase_ ( A__ , A__="" ):
sys.stdout.write(str(A__ ) + end )
sys.stdout.flush()
def UpperCamelCase_ ( A__ , A__ , A__="" ):
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , A__ )
def UpperCamelCase_ ( ):
forceWrite("""\r""" )
def UpperCamelCase_ ( A__ , A__ ):
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def UpperCamelCase_ ( ):
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCamelCase_ ( ):
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 263
| 1
|
"""simple docstring"""
import math
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : float = 0.1 ):
'''simple docstring'''
__lowerCamelCase : Dict =3
__lowerCamelCase : List[Any] =3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(SCREAMING_SNAKE_CASE )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[tf.Tensor, np.ndarray] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
return list(tensor.shape )
__lowerCamelCase : Tuple =tf.shape(SCREAMING_SNAKE_CASE )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE ):
return dynamic
__lowerCamelCase : Union[str, Any] =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE )]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=1E-5 , SCREAMING_SNAKE_CASE : Union[str, Any]=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__lowerCamelCase , __lowerCamelCase : Dict =tf.nn.moments(SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__lowerCamelCase : Dict =[1] * inputs.shape.rank
__lowerCamelCase : Dict =shape_list(SCREAMING_SNAKE_CASE )[axis]
__lowerCamelCase : List[str] =tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] =tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Compute layer normalization using the batch_normalization
# function.
__lowerCamelCase : int =tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , variance_epsilon=SCREAMING_SNAKE_CASE , )
return outputs
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict=0 , SCREAMING_SNAKE_CASE : int=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__lowerCamelCase : List[str] =tf.shape(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__lowerCamelCase : int =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : tf.Tensor ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ):
__lowerCamelCase : Union[str, Any] =tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__lowerCamelCase : str =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__lowerCamelCase : Optional[int] =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__lowerCamelCase : int =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE , tf.cast(SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__lowerCamelCase : Any =[x for x in data if len(SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
__lowerCamelCase : Tuple =np.asarray(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =1
__lowerCamelCase : Optional[Any] =np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__lowerCamelCase : Dict =np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : List[Any] =chunk_data
else:
__lowerCamelCase : Optional[int] =data
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if name in group.attrs:
__lowerCamelCase : Optional[int] =[n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs[name]]
else:
__lowerCamelCase : Tuple =[]
__lowerCamelCase : List[str] =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE )
| 363
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
__magic_name__: Optional[int] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: int = self.dummy_uncond_unet
__magic_name__: int = PNDMScheduler()
__magic_name__: Dict = PNDMPipeline(unet=__snake_case , scheduler=__snake_case )
pndm.to(__snake_case )
pndm.set_progress_bar_config(disable=__snake_case )
__magic_name__: Optional[Any] = torch.manual_seed(0 )
__magic_name__: List[str] = pndm(generator=__snake_case , num_inference_steps=2_0 , output_type="""numpy""" ).images
__magic_name__: Tuple = torch.manual_seed(0 )
__magic_name__: List[Any] = pndm(generator=__snake_case , num_inference_steps=2_0 , output_type="""numpy""" , return_dict=__snake_case )[0]
__magic_name__: Optional[Any] = image[0, -3:, -3:, -1]
__magic_name__: Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
__magic_name__: Any = """google/ddpm-cifar10-32"""
__magic_name__: List[str] = UNetaDModel.from_pretrained(__snake_case )
__magic_name__: Tuple = PNDMScheduler()
__magic_name__: Any = PNDMPipeline(unet=__snake_case , scheduler=__snake_case )
pndm.to(__snake_case )
pndm.set_progress_bar_config(disable=__snake_case )
__magic_name__: Optional[int] = torch.manual_seed(0 )
__magic_name__: List[str] = pndm(generator=__snake_case , output_type="""numpy""" ).images
__magic_name__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: Union[str, Any] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 96
|
"""simple docstring"""
def a ( __UpperCAmelCase : list[int] ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__magic_name__: Dict = sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 1
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
a_ = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
a_ = parser.parse_args()
a_ = "cpu"
a_ = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
a_ = "path-to-your-trained-model"
a_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
a_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a_ = pipe.to(device)
# to channels last
a_ = pipe.unet.to(memory_format=torch.channels_last)
a_ = pipe.vae.to(memory_format=torch.channels_last)
a_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
a_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
a_ = torch.randn(2, 4, 64, 64)
a_ = torch.rand(1) * 9_99
a_ = torch.randn(2, 77, 7_68)
a_ = (sample, timestep, encoder_hidden_status)
try:
a_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
a_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
a_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
a_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
a_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
a_ = 6_66
a_ = torch.Generator(device).manual_seed(seed)
a_ = {"generator": generator}
if args.steps is not None:
a_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
a_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 707
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def UpperCAmelCase_ ( __a : int = 1_50_00_00 ):
'''simple docstring'''
_lowerCamelCase : defaultdict = defaultdict(__a )
_lowerCamelCase : Tuple = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __a , 2 ):
if gcd(__a , __a ) > 1:
continue
_lowerCamelCase : Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__a , limit + 1 , __a ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 349
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase_ : int = 250004
UpperCAmelCase_ : List[str] = 250020
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = MBartTokenizer
UpperCamelCase = MBartTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ : List[str] =MBartTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MBartTokenizer(__snake_case , keep_accents=__snake_case )
__magic_name__ : int =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__magic_name__ : Optional[int] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__magic_name__ : Optional[Any] =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__magic_name__ : Any =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def A__ ( self :Tuple ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__magic_name__ : Tuple =(self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__magic_name__ : List[Any] =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__magic_name__ : List[str] =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__magic_name__ : Optional[Any] =tempfile.mkdtemp()
__magic_name__ : Dict =tokenizer_r.save_pretrained(__snake_case )
__magic_name__ : Dict =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__magic_name__ : int =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__magic_name__ : Any =tokenizer_r.from_pretrained(__snake_case )
__magic_name__ : List[Any] =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__magic_name__ : List[str] =tempfile.mkdtemp()
__magic_name__ : Optional[int] =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__magic_name__ : Dict =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__magic_name__ : Any =tokenizer_r.from_pretrained(__snake_case )
__magic_name__ : int =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__magic_name__ : List[Any] =tempfile.mkdtemp()
__magic_name__ : Dict =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__magic_name__ : List[str] =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__magic_name__ : str =tokenizer_r.from_pretrained(__snake_case )
__magic_name__ : Optional[int] =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
UpperCamelCase = """facebook/mbart-large-en-ro"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def A__ ( cls :str ):
'''simple docstring'''
__magic_name__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__magic_name__ : Any =1
return cls
def A__ ( self :Any ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Any =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def A__ ( self :List[Any] ):
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
__magic_name__ : Union[str, Any] =[RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__magic_name__ : Optional[int] =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : List[str] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : str =["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __snake_case )
__magic_name__ : Dict =10
__magic_name__ : Optional[Any] =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =tempfile.mkdtemp()
__magic_name__ : Dict =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__magic_name__ : Dict =MBartTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__snake_case , return_tensors="""pt""" )
__magic_name__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__magic_name__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__magic_name__ : int =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors="""pt""" )
__magic_name__ : Tuple =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors="""pt""" )
__magic_name__ : List[Any] =targets["""input_ids"""]
__magic_name__ : List[str] =shift_tokens_right(__snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 21
|
"""simple docstring"""
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase : Optional[int] = str(abs(_lowercase ) )
UpperCAmelCase : Union[str, Any] = [list(_lowercase ) for char in range(len(_lowercase ) )]
for index in range(len(_lowercase ) ):
num_transpositions[index].pop(_lowercase )
return max(
int("".join(list(_lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 595
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
for attribute in key.split('.' ):
lowerCamelCase_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCamelCase_ : str = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCamelCase_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase_ : int = value
elif weight_type == "weight_g":
lowerCamelCase_ : List[Any] = value
elif weight_type == "weight_v":
lowerCamelCase_ : Any = value
elif weight_type == "bias":
lowerCamelCase_ : str = value
elif weight_type == "running_mean":
lowerCamelCase_ : Any = value
elif weight_type == "running_var":
lowerCamelCase_ : int = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ : Any = value
elif weight_type == "inv_freq":
lowerCamelCase_ : int = value
else:
lowerCamelCase_ : str = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Dict = fairseq_model.state_dict()
lowerCamelCase_ : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ : Optional[Any] = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase_ : Optional[Any] = True
if "*" in mapped_key:
lowerCamelCase_ : Optional[int] = name.split(lowerCamelCase__ )[0].split('.' )[-2]
lowerCamelCase_ : List[Any] = mapped_key.replace('*' , lowerCamelCase__ )
if "pos_bias_u" in name:
lowerCamelCase_ : str = None
elif "pos_bias_v" in name:
lowerCamelCase_ : int = None
elif "weight_g" in name:
lowerCamelCase_ : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
lowerCamelCase_ : Optional[Any] = 'weight_v'
elif "bias" in name:
lowerCamelCase_ : Optional[int] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ : int = 'weight'
elif "running_mean" in name:
lowerCamelCase_ : List[Any] = 'running_mean'
elif "inv_freq" in name:
lowerCamelCase_ : List[Any] = 'inv_freq'
elif "running_var" in name:
lowerCamelCase_ : Optional[int] = 'running_var'
elif "num_batches_tracked" in name:
lowerCamelCase_ : str = 'num_batches_tracked'
else:
lowerCamelCase_ : int = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
lowerCamelCase_ : int = full_name.split('conv_layers.' )[-1]
lowerCamelCase_ : Any = name.split('.' )
lowerCamelCase_ : int = int(items[0] )
lowerCamelCase_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCamelCase_ : int = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCamelCase_ : List[str] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCamelCase_ : Union[str, Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCamelCase_ : Dict = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ) -> Tuple:
if config_path is not None:
lowerCamelCase_ : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(lowerCamelCase__ , hidden_act='swish' )
else:
lowerCamelCase_ : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase_ : List[Any] = 'rotary'
if is_finetuned:
if dict_path:
lowerCamelCase_ : Any = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ : Optional[int] = target_dict.pad_index
lowerCamelCase_ : Any = target_dict.bos_index
lowerCamelCase_ : List[str] = target_dict.eos_index
lowerCamelCase_ : Optional[int] = len(target_dict.symbols )
lowerCamelCase_ : int = os.path.join(lowerCamelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : List[str] = 1
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Any = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase__ , )
lowerCamelCase_ : int = True if config.feat_extract_norm == 'layer' else False
lowerCamelCase_ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowerCamelCase_ : Dict = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] = WavaVecaConformerForCTC(lowerCamelCase__ )
else:
lowerCamelCase_ : Tuple = WavaVecaConformerForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowerCamelCase_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCamelCase_ : Any = argparse.Namespace(task='audio_pretraining' )
lowerCamelCase_ : Union[str, Any] = fairseq.tasks.setup_task(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ )
lowerCamelCase_ : Dict = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 710
|
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( UpperCAmelCase ):
# warning at import time
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', UpperCAmelCase, )
| 144
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase ( self : List[str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = TextaTextGenerationPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
return generator, ["Something to write", "Something else"]
def lowerCamelCase ( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase = generator("""Something there""" )
self.assertEqual(lowerCamelCase , [{"""generated_text""": ANY(lowerCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_UpperCAmelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
] , )
_UpperCAmelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
] , )
with self.assertRaises(lowerCamelCase ):
generator(4 )
@require_torch
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator("""Something there""" , do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase , [{"""generated_text""": """"""}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
"""Something there""" , num_return_sequences=lowerCamelCase , num_beams=lowerCamelCase , )
_UpperCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = generator("""This is a test""" , do_sample=lowerCamelCase , num_return_sequences=2 , return_tensors=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCamelCase , )
self.assertEqual(
lowerCamelCase , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator("""Something there""" , do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase , [{"""generated_text""": """"""}] )
| 108
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : int = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase__ : Any = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ : List[str] = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Union[str, Any] = train_dataset.features["label"].names
if training_args.do_eval:
lowercase__ : Dict = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : str = eval_dataset.features["label"].names
if training_args.do_predict:
lowercase__ : int = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Tuple = predict_dataset.features["label"].names
# Labels
lowercase__ : List[str] = len(lowerCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel={str(lowerCamelCase__ ): label for i, label in enumerate(lowerCamelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ : Union[str, Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ : Tuple = False
def preprocess_function(lowerCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowerCamelCase__ , max_length=data_args.max_seq_length , truncation=lowerCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__ : List[Any] = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
lowercase__ : int = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowercase__ : Union[str, Any] = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__ : List[Any] = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
lowercase__ : Optional[Any] = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowercase__ : Optional[int] = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase__ : Tuple = min(len(lowerCamelCase__ ) , data_args.max_predict_samples )
lowercase__ : Tuple = predict_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowercase__ : Tuple = predict_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowercase__ : Optional[int] = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
lowercase__ : Tuple = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions
lowercase__ : int = np.argmax(lowerCamelCase__ , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ : int = default_data_collator
elif training_args.fpaa:
lowercase__ : Tuple = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 )
else:
lowercase__ : Union[str, Any] = None
# Initialize our Trainer
lowercase__ : Tuple = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowercase__ : Tuple = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Union[str, Any] = last_checkpoint
lowercase__ : str = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
lowercase__ : str = train_result.metrics
lowercase__ : List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
lowercase__ : List[Any] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCamelCase__ )
trainer.save_metrics("train" , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ : List[str] = trainer.evaluate(eval_dataset=lowerCamelCase__ )
lowercase__ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
lowercase__ : Optional[int] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(lowerCamelCase__ , metric_key_prefix="predict" )
lowercase__ : str = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase__ )
)
lowercase__ : Optional[Any] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("predict" , lowerCamelCase__ )
trainer.save_metrics("predict" , lowerCamelCase__ )
lowercase__ : str = np.argmax(lowerCamelCase__ , axis=1 )
lowercase__ : Any = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCamelCase__ ):
lowercase__ : Optional[int] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 496
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ =(UniPCMultistepScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 25),)
def __lowerCAmelCase ( self, **_a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**UpperCAmelCase_ )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> str:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(UpperCAmelCase_ )
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sample, sample
for t in range(UpperCAmelCase_, time_step + scheduler.config.solver_order + 1 ):
__SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, **UpperCAmelCase_ ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[str]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(UpperCAmelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
__SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, **UpperCAmelCase_ ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, _a=None, **_a ) -> str:
if scheduler is None:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase_, UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", UpperCAmelCase_ )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase_, "set_timesteps" ):
scheduler.set_timesteps(UpperCAmelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase_, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, **UpperCAmelCase_ ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, **UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> Dict:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__SCREAMING_SNAKE_CASE = UniPCMultistepScheduler(**self.get_scheduler_config() )
__SCREAMING_SNAKE_CASE = self.full_loop(scheduler=UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
__SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE = self.full_loop(scheduler=UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def __lowerCAmelCase ( self ) -> str:
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def __lowerCAmelCase ( self ) -> str:
self.check_over_configs(thresholding=UpperCAmelCase_ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase_, prediction_type=UpperCAmelCase_, sample_max_value=UpperCAmelCase_, solver_order=UpperCAmelCase_, solver_type=UpperCAmelCase_, )
def __lowerCAmelCase ( self ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def __lowerCAmelCase ( self ) -> Tuple:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase_, solver_type=UpperCAmelCase_, prediction_type=UpperCAmelCase_, )
__SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=UpperCAmelCase_, solver_type=UpperCAmelCase_, prediction_type=UpperCAmelCase_, )
assert not torch.isnan(UpperCAmelCase_ ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Dict:
self.check_over_configs(lower_order_final=UpperCAmelCase_ )
self.check_over_configs(lower_order_final=UpperCAmelCase_ )
def __lowerCAmelCase ( self ) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=UpperCAmelCase_, time_step=0 )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=UpperCAmelCase_, dynamic_thresholding_ratio=0 )
__SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase_, UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self, **_a ) -> Union[str, Any]:
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 711
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE__ =(("""eta""", 0.0), ("""num_inference_steps""", 50))
def __lowerCAmelCase ( self, **_a ) -> Dict:
__SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_a )
return config
def __lowerCAmelCase ( self, **_a ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10, 0.0
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> List[Any]:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_a )
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(steps_offset=1 )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps, torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def __lowerCAmelCase ( self ) -> List[str]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a, beta_end=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_a )
def __lowerCAmelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_a, prediction_type=_a, sample_max_value=_a, )
def __lowerCAmelCase ( self ) -> Dict:
for t in [1, 10, 49]:
self.check_over_forward(time_step=_a )
def __lowerCAmelCase ( self ) -> Dict:
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 5_00] ):
self.check_over_forward(time_step=_a, num_inference_steps=_a )
def __lowerCAmelCase ( self ) -> Any:
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_a, eta=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20, 4_00 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80, 9_60 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87, 4_86 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99, 9_98 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10, 0.0
scheduler.set_timesteps(_a )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
__SCREAMING_SNAKE_CASE = samplea.shape[0]
__SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea], dim=0 )
__SCREAMING_SNAKE_CASE = torch.arange(_a )[0:3, None].repeat(1, _a )
__SCREAMING_SNAKE_CASE = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
__SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(_a, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), _a )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def __lowerCAmelCase ( self ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
__SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=_a, beta_start=0.01 )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
# We specify different beta, so that the first alpha is 0.99
__SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=_a, beta_start=0.01 )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 214
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[Any] ={
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __lowercase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '''sew'''
def __init__( self , lowerCAmelCase__=3_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__=2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase__=False , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=1_6 , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=1_0 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : int = feat_extract_norm
SCREAMING_SNAKE_CASE_ : Optional[Any] = feat_extract_activation
SCREAMING_SNAKE_CASE_ : str = list(A_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(A_ )
SCREAMING_SNAKE_CASE_ : List[Any] = list(A_ )
SCREAMING_SNAKE_CASE_ : int = conv_bias
SCREAMING_SNAKE_CASE_ : Tuple = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ : Any = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = intermediate_size
SCREAMING_SNAKE_CASE_ : int = squeeze_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout
SCREAMING_SNAKE_CASE_ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_ : List[Any] = feat_proj_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = final_dropout
SCREAMING_SNAKE_CASE_ : List[str] = layerdrop
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : Union[str, Any] = apply_spec_augment
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_time_prob
SCREAMING_SNAKE_CASE_ : Any = mask_time_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : List[str] = mask_feature_prob
SCREAMING_SNAKE_CASE_ : List[Any] = mask_feature_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE_ : Optional[int] = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ : str = ctc_zero_infinity
# sequence classification
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ : List[str] = classifier_proj_size
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 101
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowercase : str = "pt"
elif is_tf_available():
__lowercase : str = "tf"
else:
__lowercase : Dict = "jax"
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = PerceiverTokenizer
UpperCamelCase_ : str = False
def lowercase ( self : Any ) -> str:
super().setUp()
__snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ) -> Dict:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase ( self : str , **A_ : Any ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Union[str, Any] , A_ : Tuple , A_ : List[str]=False , A_ : List[Any]=20 , A_ : Union[str, Any]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case = []
for i in range(len(A_ ) ):
try:
__snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=A_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case = list(filter(lambda A_ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , A_ ) )
__snake_case = list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=A_ ) , A_ ) )
if max_length is not None and len(A_ ) > max_length:
__snake_case = toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
__snake_case = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case = [t[0] for t in toks]
# Ensure consistency
__snake_case = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
__snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
__snake_case = ''' ''' + output_txt
__snake_case = tokenizer.encode(A_ , add_special_tokens=A_ )
return output_txt, output_ids
def lowercase ( self : Optional[int] ) -> List[Any]:
__snake_case = self.perceiver_tokenizer
__snake_case = '''Unicode €.'''
__snake_case = tokenizer(A_ )
__snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , A_ )
# decoding
__snake_case = tokenizer.decode(A_ )
self.assertEqual(A_ , '''[CLS]Unicode €.[SEP]''' )
__snake_case = tokenizer('''e è é ê ë''' )
__snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , A_ )
# decoding
__snake_case = tokenizer.decode(A_ )
self.assertEqual(A_ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase ( self : str ) -> int:
__snake_case = self.perceiver_tokenizer
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case = tokenizer(A_ , padding=A_ , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
if FRAMEWORK != "jax":
__snake_case = list(batch.input_ids.numpy()[0] )
else:
__snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case = self.perceiver_tokenizer
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = tokenizer(A_ , padding=A_ , return_tensors=A_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , A_ )
self.assertIn('''attention_mask''' , A_ )
self.assertNotIn('''decoder_input_ids''' , A_ )
self.assertNotIn('''decoder_attention_mask''' , A_ )
def lowercase ( self : List[str] ) -> str:
__snake_case = self.perceiver_tokenizer
__snake_case = [
'''Summary of the text.''',
'''Another summary.''',
]
__snake_case = tokenizer(
text_target=A_ , max_length=32 , padding='''max_length''' , truncation=A_ , return_tensors=A_ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ''' He is very happy, UNwant\u00E9d,running'''
__snake_case = tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__snake_case = tokenizer.__class__.from_pretrained(A_ )
__snake_case = after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
shutil.rmtree(A_ )
__snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__snake_case = tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__snake_case = tokenizer.__class__.from_pretrained(A_ )
__snake_case = after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case = tokenizer.__class__.from_pretrained(A_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(A_ )
def lowercase ( self : Optional[Any] ) -> str:
__snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
with open(os.path.join(A_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__snake_case = json.load(A_ )
with open(os.path.join(A_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__snake_case = json.load(A_ )
__snake_case = [f"<extra_id_{i}>" for i in range(125 )]
__snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(A_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A_ , A_ )
with open(os.path.join(A_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A_ , A_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case = tokenizer_class.from_pretrained(
A_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=A_ )]
__snake_case = tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase ( self : int ) -> Optional[int]:
__snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def lowercase ( self : Dict ) -> List[Any]:
pass
def lowercase ( self : Tuple ) -> Dict:
pass
def lowercase ( self : Optional[int] ) -> List[str]:
pass
def lowercase ( self : int ) -> Optional[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case = self.get_tokenizers(fast=A_ , do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__snake_case = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__snake_case = tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(A_ , A_ )
| 564
| 0
|
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Any:
if isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase = len(set_a.intersection(snake_case__ ) )
if alternative_union:
UpperCAmelCase = len(snake_case__ ) + len(snake_case__ )
else:
UpperCAmelCase = len(set_a.union(snake_case__ ) )
return intersection / union
if isinstance(snake_case__ , (list, tuple) ) and isinstance(snake_case__ , (list, tuple) ):
UpperCAmelCase = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase = len(snake_case__ ) + len(snake_case__ )
return len(snake_case__ ) / union
else:
UpperCAmelCase = set_a + [element for element in set_b if element not in set_a]
return len(snake_case__ ) / len(snake_case__ )
return len(snake_case__ ) / len(snake_case__ )
return None
if __name__ == "__main__":
__lowerCamelCase : Tuple = {"a", "b", "c", "d", "e"}
__lowerCamelCase : Dict = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 700
|
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , **lowerCamelCase_ ) -> Optional[int]:
UpperCAmelCase = [x.strip() for x in open(lowerCamelCase_ ).readlines()]
UpperCAmelCase = [x.strip() for x in open(lowerCamelCase_ ).readlines()][: len(lowerCamelCase_ )]
UpperCAmelCase = calculate_rouge(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
if save_path is not None:
save_json(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 457
| 0
|
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase = 10_00 ) -> int:
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__(self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_5_5 , A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ : int = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCamelCase_ : Any = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : List[Any] = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : Optional[int] = do_normalize
lowerCamelCase_ : Union[str, Any] = image_mean
lowerCamelCase_ : str = image_std
lowerCamelCase_ : List[Any] = do_rescale
lowerCamelCase_ : str = rescale_factor
lowerCamelCase_ : Optional[int] = do_pad
def UpperCAmelCase__ (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ (self , A , A=False ):
if not batched:
lowerCamelCase_ : Any = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase_, lowerCamelCase_ : int = image.size
else:
lowerCamelCase_, lowerCamelCase_ : Any = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ : str = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase_ : Union[str, Any] = self.size['''shortest_edge''']
lowerCamelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase_ : Any = self.size['''shortest_edge''']
lowerCamelCase_ : Tuple = self.size['''shortest_edge''']
else:
lowerCamelCase_ : Optional[Any] = []
for image in image_inputs:
lowerCamelCase_, lowerCamelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ : Dict = max(A , key=lambda A : item[0] )[0]
lowerCamelCase_ : int = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = DetaImageProcessingTester(self )
@property
def UpperCAmelCase__ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
self.assertTrue(hasattr(A , '''do_pad''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : Any = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : Tuple = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : List[str] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : Optional[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ (self ):
# prepare image and target
lowerCamelCase_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ : Any = json.loads(f.read() )
lowerCamelCase_ : str = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCamelCase_ : Optional[Any] = DetaImageProcessor()
lowerCamelCase_ : Optional[int] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase_ : int = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase_ : List[str] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowerCamelCase_ : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase_ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def UpperCAmelCase__ (self ):
# prepare image, target and masks_path
lowerCamelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ : Tuple = json.loads(f.read() )
lowerCamelCase_ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCamelCase_ : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase_ : Any = DetaImageProcessor(format='''coco_panoptic''' )
lowerCamelCase_ : Dict = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase_ : int = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase_ : Dict = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowerCamelCase_ : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowerCamelCase_ : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase_ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 422
| 0
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCamelCase_ = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
UpperCamelCase_ = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
UpperCamelCase_ = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def __lowerCamelCase ( self : Dict ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[Any]=False ) -> List[str]:
if rouge_types is None:
SCREAMING_SNAKE_CASE__ :Optional[Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE__ :List[Any] = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase_ , use_stemmer=UpperCamelCase_ )
if use_aggregator:
SCREAMING_SNAKE_CASE__ :List[Any] = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE__ :Optional[int] = []
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Any = scorer.score(UpperCamelCase_ , UpperCamelCase_ )
if use_aggregator:
aggregator.add_scores(UpperCamelCase_ )
else:
scores.append(UpperCamelCase_ )
if use_aggregator:
SCREAMING_SNAKE_CASE__ :Optional[Any] = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE__ :Tuple = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [score[key] for score in scores]
return result
| 710
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Any = (DDIMParallelScheduler,)
A_ : Dict = (('eta', 0.0), ('num_inference_steps', 50))
def __lowerCamelCase ( self : Optional[Any] , **UpperCamelCase_ : int ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[int] = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCamelCase_ )
return config
def __lowerCamelCase ( self : str , **UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ :Tuple = self.get_scheduler_config(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = scheduler_class(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = 10, 0.0
SCREAMING_SNAKE_CASE__ :Dict = self.dummy_model()
SCREAMING_SNAKE_CASE__ :List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ :List[str] = model(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def __lowerCamelCase ( self : int ) -> int:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE__ :Optional[Any] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def __lowerCamelCase ( self : List[Any] ) -> Any:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def __lowerCamelCase ( self : str ) -> Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def __lowerCamelCase ( self : Any ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> int:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase_ )
def __lowerCamelCase ( self : Any ) -> Tuple:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] ) -> int:
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def __lowerCamelCase ( self : List[Any] ) -> List[Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCamelCase_ )
def __lowerCamelCase ( self : Tuple ) -> Tuple:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ )
def __lowerCamelCase ( self : Tuple ) -> List[str]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ )
def __lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ :Tuple = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def __lowerCamelCase ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ :List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = 10, 0.0
scheduler.set_timesteps(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ :Any = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ :str = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE__ :Any = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE__ :Any = samplea.shape[0]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE__ :Optional[int] = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Optional[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def __lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Tuple = self.full_loop()
SCREAMING_SNAKE_CASE__ :str = torch.sum(torch.abs(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def __lowerCamelCase ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.full_loop(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE__ :str = torch.sum(torch.abs(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def __lowerCamelCase ( self : str ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE__ :int = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE__ :Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def __lowerCamelCase ( self : Optional[Any] ) -> Any:
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE__ :List[str] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 320
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_SCREAMING_SNAKE_CASE : str = params
_SCREAMING_SNAKE_CASE : List[Any] = np.array(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array([len(lowerCAmelCase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , lowerCAmelCase_ ) -> List[str]:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Union[str, Any]:
return len(self.lengths )
def A ( self ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.params.max_model_input_size
_SCREAMING_SNAKE_CASE : Dict = self.lengths > max_len
logger.info(F"""Splitting {sum(lowerCAmelCase_ )} too long sequences.""" )
def divide_chunks(lowerCAmelCase_ , lowerCAmelCase_ ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )]
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
_SCREAMING_SNAKE_CASE : Dict = []
if self.params.mlm:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_SCREAMING_SNAKE_CASE : int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_SCREAMING_SNAKE_CASE : Any = np.insert(lowerCAmelCase_ , 0 , lowerCAmelCase_ )
if sub_s[-1] != sep_id:
_SCREAMING_SNAKE_CASE : Any = np.insert(lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase_ )
new_tok_ids.extend(lowerCAmelCase_ )
new_lengths.extend([len(lowerCAmelCase_ ) for l in sub_seqs] )
_SCREAMING_SNAKE_CASE : List[str] = np.array(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(lowerCAmelCase_ )
def A ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = len(self )
_SCREAMING_SNAKE_CASE : str = self.lengths > 1_1
_SCREAMING_SNAKE_CASE : int = self.token_ids[indices]
_SCREAMING_SNAKE_CASE : Optional[int] = self.lengths[indices]
_SCREAMING_SNAKE_CASE : Optional[int] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def A ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_SCREAMING_SNAKE_CASE : Any = self.params.special_tok_ids['unk_token']
_SCREAMING_SNAKE_CASE : Any = len(self )
_SCREAMING_SNAKE_CASE : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (unk_occs / self.lengths) < 0.5
_SCREAMING_SNAKE_CASE : Optional[int] = self.token_ids[indices]
_SCREAMING_SNAKE_CASE : List[str] = self.lengths[indices]
_SCREAMING_SNAKE_CASE : Optional[int] = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def A ( self ) -> str:
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , lowerCAmelCase_ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Tuple = [t[0] for t in batch]
_SCREAMING_SNAKE_CASE : List[str] = [t[1] for t in batch]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
# Max for paddings
_SCREAMING_SNAKE_CASE : List[Any] = max(lowerCAmelCase_ )
# Pad token ids
if self.params.mlm:
_SCREAMING_SNAKE_CASE : Dict = self.params.special_tok_ids['pad_token']
else:
_SCREAMING_SNAKE_CASE : Dict = self.params.special_tok_ids['unk_token']
_SCREAMING_SNAKE_CASE : Optional[int] = [list(t.astype(lowerCAmelCase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase_ )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase_ )
assert all(len(lowerCAmelCase_ ) == max_seq_len_ for t in tk_ )
_SCREAMING_SNAKE_CASE : str = torch.tensor(tk_ ) # (bs, max_seq_len_)
_SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase_ ) # (bs)
return tk_t, lg_t
| 621
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
SCREAMING_SNAKE_CASE_: Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE_: bool = field(
default=__UpperCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE_: bool = field(
default=__UpperCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE_: Optional[int] = field(
default=__UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE_: Optional[int] = field(
default=__UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE_: Optional[int] = field(
default=__UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _lowerCAmelCase :
SCREAMING_SNAKE_CASE_: str = field(
default=__UpperCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE_: str = field(
default=__UpperCAmelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
SCREAMING_SNAKE_CASE_: Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Train language if it is different from the evaluation language.'} )
SCREAMING_SNAKE_CASE_: Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE_: Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE_: Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE_: Optional[bool] = field(
default=__UpperCAmelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
SCREAMING_SNAKE_CASE_: bool = field(
default=__UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE_: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE_: bool = field(
default=__UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
SCREAMING_SNAKE_CASE_: bool = field(
default=__UpperCAmelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli', lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Any = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
'xnli', model_args.language, split='train', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
_SCREAMING_SNAKE_CASE : int = load_dataset(
'xnli', model_args.train_language, split='train', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_SCREAMING_SNAKE_CASE : Dict = train_dataset.features['label'].names
if training_args.do_eval:
_SCREAMING_SNAKE_CASE : Any = load_dataset(
'xnli', model_args.language, split='validation', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_SCREAMING_SNAKE_CASE : List[Any] = eval_dataset.features['label'].names
if training_args.do_predict:
_SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
'xnli', model_args.language, split='test', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_SCREAMING_SNAKE_CASE : int = predict_dataset.features['label'].names
# Labels
_SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowerCamelCase, idalabel={str(lowerCamelCase ): label for i, label in enumerate(lowerCamelCase )}, labelaid={label: i for i, label in enumerate(lowerCamelCase )}, finetuning_task='xnli', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_SCREAMING_SNAKE_CASE : Tuple = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_SCREAMING_SNAKE_CASE : Tuple = False
def preprocess_function(lowerCamelCase ):
# Tokenize the texts
return tokenizer(
examples['premise'], examples['hypothesis'], padding=lowerCamelCase, max_length=data_args.max_seq_length, truncation=lowerCamelCase, )
if training_args.do_train:
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE : List[Any] = min(len(lowerCamelCase ), data_args.max_train_samples )
_SCREAMING_SNAKE_CASE : Tuple = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_SCREAMING_SNAKE_CASE : Tuple = train_dataset.map(
lowerCamelCase, batched=lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on train dataset', )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase ) ), 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE : Dict = min(len(lowerCamelCase ), data_args.max_eval_samples )
_SCREAMING_SNAKE_CASE : Optional[Any] = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_SCREAMING_SNAKE_CASE : Tuple = eval_dataset.map(
lowerCamelCase, batched=lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on validation dataset', )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_SCREAMING_SNAKE_CASE : Tuple = min(len(lowerCamelCase ), data_args.max_predict_samples )
_SCREAMING_SNAKE_CASE : Dict = predict_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
_SCREAMING_SNAKE_CASE : Tuple = predict_dataset.map(
lowerCamelCase, batched=lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on prediction dataset', )
# Get the metric function
_SCREAMING_SNAKE_CASE : Tuple = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = p.predictions[0] if isinstance(p.predictions, lowerCamelCase ) else p.predictions
_SCREAMING_SNAKE_CASE : int = np.argmax(lowerCamelCase, axis=1 )
return metric.compute(predictions=lowerCamelCase, references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_SCREAMING_SNAKE_CASE : List[str] = default_data_collator
elif training_args.fpaa:
_SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase, pad_to_multiple_of=8 )
else:
_SCREAMING_SNAKE_CASE : Tuple = None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=lowerCamelCase, args=lowerCamelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=lowerCamelCase, tokenizer=lowerCamelCase, data_collator=lowerCamelCase, )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = last_checkpoint
_SCREAMING_SNAKE_CASE : List[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = train_result.metrics
_SCREAMING_SNAKE_CASE : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
_SCREAMING_SNAKE_CASE : Tuple = min(lowerCamelCase, len(lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train', lowerCamelCase )
trainer.save_metrics('train', lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate(eval_dataset=lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = min(lowerCamelCase, len(lowerCamelCase ) )
trainer.log_metrics('eval', lowerCamelCase )
trainer.save_metrics('eval', lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = trainer.predict(lowerCamelCase, metric_key_prefix='predict' )
_SCREAMING_SNAKE_CASE : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase )
)
_SCREAMING_SNAKE_CASE : Optional[Any] = min(lowerCamelCase, len(lowerCamelCase ) )
trainer.log_metrics('predict', lowerCamelCase )
trainer.save_metrics('predict', lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.argmax(lowerCamelCase, axis=1 )
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(training_args.output_dir, 'predictions.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase, 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 621
| 1
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase ( snake_case__ : Tuple , snake_case__ : List[Any] )-> Optional[int]:
# Load checkpoint
A_ = torch.load(snake_case__ , map_location="cpu" )
A_ = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
A_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
A_ = v
else:
A_ = v
A_ = chkpt["params"]
A_ = {n: v for n, v in config.items() if not isinstance(snake_case__ , (torch.FloatTensor, numpy.ndarray) )}
A_ = chkpt["dico_word2id"]
A_ = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
A_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
A_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
A_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(snake_case__ , snake_case__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + "\n" )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + "\n" )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : str = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 708
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[10, 20, 30, 40] , __UpperCamelCase=[2, 2, 3, 2] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=["stage2", "stage3", "stage4"] , __UpperCamelCase=[2, 3, 4] , __UpperCamelCase=None , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = num_stages
A_ = hidden_sizes
A_ = depths
A_ = is_training
A_ = use_labels
A_ = intermediate_size
A_ = hidden_act
A_ = num_labels
A_ = initializer_range
A_ = out_features
A_ = out_indices
A_ = scope
def lowercase_ ( self ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = ConvNextVaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A_ = model(__UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = ConvNextVaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = ConvNextVaBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A_ = model(__UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ = None
A_ = ConvNextVaBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A_ = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self ):
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
def lowercase_ ( self ):
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase_ ( self ):
A_ = ConvNextVaModelTester(self )
A_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowercase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def lowercase_ ( self ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def lowercase_ ( self ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A_ , A_ = self.model_tester.prepare_config_and_inputs_with_labels()
A_ = True
if model_class.__name__ in [
*get_values(__UpperCamelCase ),
*get_values(__UpperCamelCase ),
]:
continue
A_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
A_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A_ = model(**__UpperCamelCase ).loss
loss.backward()
def lowercase_ ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A_ , A_ = self.model_tester.prepare_config_and_inputs_with_labels()
A_ = False
A_ = True
if (
model_class.__name__
in [*get_values(__UpperCamelCase ), *get_values(__UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
A_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A_ = model(**__UpperCamelCase ).loss
loss.backward()
def lowercase_ ( self ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(__UpperCamelCase )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase_ ( self ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase_ ( self ):
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase_ ( self ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowercase_ ( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ConvNextVaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase ( )-> List[str]:
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
A_ = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__UpperCamelCase )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = preprocessor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A_ = model(**__UpperCamelCase )
# verify the logits
A_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A_ = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 608
| 0
|
def a__ ( A__, A__ ):
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(A__ ):
for j in range(A__ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ), end='\t' )
else:
print('INF', end='\t' )
print()
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = [[float('inf' ) for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
SCREAMING_SNAKE_CASE_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(A__ ):
# looping through rows of graph array
for i in range(A__ ):
# looping through columns of graph array
for j in range(A__ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
SCREAMING_SNAKE_CASE_ : str = dist[i][k] + dist[k][j]
_print_dist(A__, A__ )
return dist, v
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] =int(input('Enter number of vertices: '))
lowerCAmelCase__ : Optional[int] =int(input('Enter number of edges: '))
lowerCAmelCase__ : Dict =[[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase__ : Union[str, Any] =0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase__ : List[Any] =int(input('Enter source:'))
lowerCAmelCase__ : List[Any] =int(input('Enter destination:'))
lowerCAmelCase__ : Union[str, Any] =float(input('Enter weight:'))
lowerCAmelCase__ : Any =weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 101
|
import enum
import shutil
import sys
__UpperCamelCase, __UpperCamelCase: Optional[int] = shutil.get_terminal_size()
__UpperCamelCase: Optional[Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class __lowerCAmelCase ( enum.Enum ):
'''simple docstring'''
_A = 0
_A = 1
def SCREAMING_SNAKE_CASE__ ( _lowercase : List[Any] , _lowercase : Union[str, Any]="" ) -> int:
'''simple docstring'''
sys.stdout.write(str(_lowercase ) + end )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE__ ( _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int]="" ) -> str:
'''simple docstring'''
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , _lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
forceWrite('\r' )
def SCREAMING_SNAKE_CASE__ ( _lowercase : int , _lowercase : str ) -> List[Any]:
'''simple docstring'''
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 266
| 0
|
import numpy as np
_SCREAMING_SNAKE_CASE = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class a :
"""simple docstring"""
def __init__( self ) -> None:
_A = np.array(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> np.ndarray:
_A , _A = np.where(letter == self.SQUARE )
_A = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = message.lower()
_A = message.replace(""" """ , """""" )
_A = message.replace("""j""" , """i""" )
_A = np.empty((2, len(lowerCAmelCase_ )) )
for letter_index in range(len(lowerCAmelCase_ ) ):
_A = self.letter_to_numbers(message[letter_index] )
_A = numbers[0]
_A = numbers[1]
_A = first_step.reshape(2 * len(lowerCAmelCase_ ) )
_A = """"""
for numbers_index in range(len(lowerCAmelCase_ ) ):
_A = int(second_step[numbers_index * 2] )
_A = int(second_step[(numbers_index * 2) + 1] )
_A = self.numbers_to_letter(lowerCAmelCase_ , lowerCAmelCase_ )
_A = encoded_message + letter
return encoded_message
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = message.lower()
message.replace(""" """ , """""" )
_A = np.empty(2 * len(lowerCAmelCase_ ) )
for letter_index in range(len(lowerCAmelCase_ ) ):
_A = self.letter_to_numbers(message[letter_index] )
_A = numbers[0]
_A = numbers[1]
_A = first_step.reshape((2, len(lowerCAmelCase_ )) )
_A = """"""
for numbers_index in range(len(lowerCAmelCase_ ) ):
_A = int(second_step[0, numbers_index] )
_A = int(second_step[1, numbers_index] )
_A = self.numbers_to_letter(lowerCAmelCase_ , lowerCAmelCase_ )
_A = decoded_message + letter
return decoded_message
| 83
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83
| 1
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase :
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[Any]:
if not conversation_id:
_SCREAMING_SNAKE_CASE : Tuple = uuid.uuida()
if past_user_inputs is None:
_SCREAMING_SNAKE_CASE : Any = []
if generated_responses is None:
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : uuid.UUID = conversation_id
_SCREAMING_SNAKE_CASE : List[str] = past_user_inputs
_SCREAMING_SNAKE_CASE : List[str] = generated_responses
_SCREAMING_SNAKE_CASE : Optional[str] = text
def __eq__( self , lowerCAmelCase_ ) -> str:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Optional[int]:
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = text
def A ( self ) -> List[str]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_SCREAMING_SNAKE_CASE : Any = None
def A ( self , lowerCAmelCase_ ) -> Dict:
self.generated_responses.append(lowerCamelCase_ )
def A ( self ) -> Optional[Any]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Any = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
_SCREAMING_SNAKE_CASE : str = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__UpperCAmelCase , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
if self.tokenizer.pad_token_id is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.eos_token
def A ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = {}
_SCREAMING_SNAKE_CASE : Tuple = {}
_SCREAMING_SNAKE_CASE : str = {}
if min_length_for_response is not None:
_SCREAMING_SNAKE_CASE : str = min_length_for_response
if minimum_tokens is not None:
_SCREAMING_SNAKE_CASE : List[Any] = minimum_tokens
if "max_length" in generate_kwargs:
_SCREAMING_SNAKE_CASE : Dict = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_SCREAMING_SNAKE_CASE : Any = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=0 , **lowerCAmelCase_ ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = super().__call__(lowerCamelCase_ , num_workers=lowerCamelCase_ , **lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) == 1:
return outputs[0]
return outputs
def A ( self , lowerCAmelCase_ , lowerCAmelCase_=3_2 ) -> Dict[str, Any]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer._build_conversation_input_ids(lowerCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_SCREAMING_SNAKE_CASE : Dict = self._legacy_parse_and_tokenize(lowerCamelCase_ )
if self.framework == "pt":
_SCREAMING_SNAKE_CASE : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_SCREAMING_SNAKE_CASE : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def A ( self , lowerCAmelCase_ , lowerCAmelCase_=1_0 , **lowerCAmelCase_ ) -> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = generate_kwargs.get('max_length' , self.model.config.max_length )
_SCREAMING_SNAKE_CASE : Tuple = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
_SCREAMING_SNAKE_CASE : Tuple = max_length - minimum_tokens
_SCREAMING_SNAKE_CASE : Dict = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
_SCREAMING_SNAKE_CASE : List[Any] = model_inputs['''attention_mask'''][:, -trim:]
_SCREAMING_SNAKE_CASE : Optional[int] = model_inputs.pop('conversation' )
_SCREAMING_SNAKE_CASE : List[str] = max_length
_SCREAMING_SNAKE_CASE : List[str] = self.model.generate(**lowerCamelCase_ , **lowerCamelCase_ )
if self.model.config.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : str = 1
else:
_SCREAMING_SNAKE_CASE : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def A ( self , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[str] = model_outputs['''output_ids''']
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , )
_SCREAMING_SNAKE_CASE : str = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(lowerCamelCase_ )
return conversation
def A ( self , lowerCAmelCase_ ) -> Dict:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.eos_token_id
_SCREAMING_SNAKE_CASE : List[str] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
if len(lowerCamelCase_ ) > self.tokenizer.model_max_length:
_SCREAMING_SNAKE_CASE : Optional[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 621
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __UpperCamelCase( _A : List[str] , _A : Optional[int] , _A : Dict=8 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase__ : Tuple = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,movq=lowerCamelCase_ ,)
UpperCAmelCase__ : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
if latents is None:
UpperCAmelCase__ : Dict = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ,device=lowerCamelCase_ ,dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase__ : Optional[Any] = latents.to(lowerCamelCase_ )
UpperCAmelCase__ : Dict = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ,) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = len(lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else 1
# get prompt text embeddings
UpperCAmelCase__ : Tuple = self.tokenizer(
lowerCamelCase_ ,padding='''max_length''' ,truncation=lowerCamelCase_ ,max_length=77 ,return_attention_mask=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_tensors='''pt''' ,)
UpperCAmelCase__ : List[Any] = text_inputs.input_ids
UpperCAmelCase__ : List[str] = self.tokenizer(lowerCamelCase_ ,padding='''longest''' ,return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase__ : Tuple = text_input_ids.to(lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = text_inputs.attention_mask.to(lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.text_encoder(
input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
UpperCAmelCase__ : Tuple = prompt_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : List[str] = text_encoder_hidden_states.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : str = text_mask.repeat_interleave(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ : List[str]
if negative_prompt is None:
UpperCAmelCase__ : List[Any] = [''''''] * batch_size
elif type(lowerCamelCase_ ) is not type(lowerCamelCase_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_ )} !='''
f''' {type(lowerCamelCase_ )}.''' )
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : Dict = [negative_prompt]
elif batch_size != len(lowerCamelCase_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase__ : Dict = negative_prompt
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(
lowerCamelCase_ ,padding='''max_length''' ,max_length=77 ,truncation=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_tensors='''pt''' ,)
UpperCAmelCase__ : Optional[Any] = uncond_input.input_ids.to(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = uncond_input.attention_mask.to(lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.text_encoder(
input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ : Optional[int] = negative_prompt_embeds.shape[1]
UpperCAmelCase__ : List[Any] = negative_prompt_embeds.repeat(1 ,lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,lowerCamelCase_ )
UpperCAmelCase__ : int = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase__ : int = uncond_text_encoder_hidden_states.repeat(1 ,lowerCamelCase_ ,1 )
UpperCAmelCase__ : List[str] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,lowerCamelCase_ ,-1 )
UpperCAmelCase__ : Union[str, Any] = uncond_text_mask.repeat_interleave(lowerCamelCase_ ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase__ : List[str] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase__ : Dict = torch.device(f'''cuda:{gpu_id}''' )
UpperCAmelCase__ : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' ,'''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase__ : str = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' ,silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ : Optional[Any] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cpu_offload_with_hook(lowerCamelCase_ ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ )
if self.safety_checker is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cpu_offload_with_hook(self.safety_checker ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
UpperCAmelCase__ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not hasattr(self.unet ,'''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ ,'''_hf_hook''' )
and hasattr(module._hf_hook ,'''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = 512 ,lowerCamelCase_ = 512 ,lowerCamelCase_ = 100 ,lowerCamelCase_ = 4.0 ,lowerCamelCase_ = 1 ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = "pil" ,lowerCamelCase_ = True ,) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = 1
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = len(lowerCamelCase_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}''' )
UpperCAmelCase__ : Union[str, Any] = self._execution_device
UpperCAmelCase__ : Optional[int] = batch_size * num_images_per_prompt
UpperCAmelCase__ : Dict = guidance_scale > 1.0
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._encode_prompt(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = torch.cat(lowerCamelCase_ ,dim=0 )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : int = torch.cat(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ : Optional[Any] = image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : Optional[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ ,device=lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.scheduler.timesteps
UpperCAmelCase__ : List[Any] = self.unet.config.in_channels
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = get_new_h_w(lowerCamelCase_ ,lowerCamelCase_ ,self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ : Tuple = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
UpperCAmelCase__ : int = self.unet(
sample=lowerCamelCase_ ,timestep=lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,added_cond_kwargs=lowerCamelCase_ ,return_dict=lowerCamelCase_ ,)[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = noise_pred.split(latents.shape[1] ,dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ : Any = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = variance_pred.chunk(2 )
UpperCAmelCase__ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ : List[str] = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : Tuple = self.scheduler.step(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ,).prev_sample
# post-processing
UpperCAmelCase__ : Optional[int] = self.movq.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase__ : Optional[Any] = image * 0.5 + 0.5
UpperCAmelCase__ : Any = image.clamp(0 ,1 )
UpperCAmelCase__ : str = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ : List[Any] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 614
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(lowerCamelCase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__SCREAMING_SNAKE_CASE = i + 1
else:
__SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 712
|
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__magic_name__ = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
__magic_name__ = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
__magic_name__ = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="binary" ):
__SCREAMING_SNAKE_CASE = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = f"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
__SCREAMING_SNAKE_CASE = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__SCREAMING_SNAKE_CASE = [(pred, label)]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = [], []
for question, preds_labels in question_map.items():
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = zip(*UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="""macro""" )
fas.append(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def snake_case_ ( self):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64"""),
"query": datasets.Value("""int64"""),
},
"prediction_text": datasets.Value("""string"""),
},
"references": {
"idx": {
"passage": datasets.Value("""int64"""),
"query": datasets.Value("""int64"""),
},
"answers": datasets.Sequence(datasets.Value("""string""")),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64"""),
"paragraph": datasets.Value("""int64"""),
"question": datasets.Value("""int64"""),
},
"prediction": datasets.Value("""int64"""),
},
"references": datasets.Value("""int64"""),
}
else:
return {
"predictions": datasets.Value("""int64"""),
"references": datasets.Value("""int64"""),
}
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase__ , lowerCAmelCase__)}
elif self.config_name == "cb":
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ , fa_avg="""macro""")
elif self.config_name == "record":
__SCREAMING_SNAKE_CASE = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
__SCREAMING_SNAKE_CASE = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(lowerCAmelCase__ , lowerCAmelCase__)[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__)}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""")
| 248
| 0
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :int = 16 , lowerCAmelCase__ :int = 88 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :int = 32 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :str = "geglu" , lowerCAmelCase__ :Optional[int] = None , ) ->Dict:
super().__init__()
lowercase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , num_layers=lowerCAmelCase__ , dropout=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , cross_attention_dim=lowerCAmelCase__ , attention_bias=lowerCAmelCase__ , sample_size=lowerCAmelCase__ , num_vector_embeds=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , num_embeds_ada_norm=lowerCAmelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowercase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowercase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowercase = [1, 0]
def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :bool = True , ) ->List[str]:
lowercase = hidden_states
lowercase = []
lowercase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowercase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowercase = self.transformer_index_for_condition[i]
lowercase = self.transformers[transformer_index](
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ , cross_attention_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowercase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowercase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase__ )
| 441
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_snake_case : Tuple = False
_snake_case : Optional[int] = True
_snake_case : Any = False
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_snake_case : Tuple = parser.parse_args()
_snake_case : Optional[Any] = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
_snake_case : Tuple = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
_snake_case : Any = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
_snake_case : List[Any] = reader.read()
_snake_case : List[Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
_snake_case : List[Any] = UNetaDModel(**config)
else:
_snake_case : int = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
_snake_case : Optional[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_snake_case : Any = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_snake_case : Any = config[key]
del config[key]
_snake_case : List[str] = [k.replace("UNetRes", "") for k in config["down_block_types"]]
_snake_case : Any = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
_snake_case : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
_snake_case : Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
_snake_case : List[Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
_snake_case : Dict = param_value
_snake_case : Any = True
if not has_changed:
_snake_case : int = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 441
| 1
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase__ = datasets.utils.logging.get_logger(__name__)
class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
snake_case = None
snake_case = None
class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
snake_case = datasets.Audio()
snake_case = "audio"
snake_case = AudioFolderConfig
snake_case = 42 # definition at the bottom of the script
snake_case = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowercase__ = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowercase__ = AUDIO_EXTENSIONS
| 700
|
'''simple docstring'''
import os
from pathlib import Path
def __snake_case ( ):
from torch.utils.cpp_extension import load
snake_case_ = Path(lowercase ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
snake_case_ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowercase , with_cuda=lowercase , extra_include_paths=[str(lowercase )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 420
| 0
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A , A , A , A , ) -> None:
lowerCAmelCase__ = len(A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A , A , )
def _snake_case ( A ) -> None:
lowerCAmelCase__ = []
depth_first_search([] , [] , [] , A , A )
# Print all the boards
for board in boards:
for column in board:
print(A )
print('''''' )
print(len(A ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 90
|
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __UpperCamelCase (unittest.TestCase ):
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertAlmostEqual(_lowerCAmelCase , _lowerCAmelCase , delta=_lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowerCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = None
ops.enable_eager_execution_internal()
lowercase = tf.config.list_physical_devices("""CPU""" )
if len(_lowerCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowercase = tf.config.list_logical_devices(device_type="""CPU""" )
lowercase = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowercase = GradientAccumulator()
lowercase = tf.Variable([4.0, 3.0] )
lowercase , lowercase = create_optimizer(5E-5 , 10 , 5 )
lowercase = tf.Variable([0.0, 0.0] , trainable=_lowerCAmelCase )
def accumulate_on_replica(_lowerCAmelCase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_lowerCAmelCase , _lowerCAmelCase ):
with strategy.scope():
lowercase = strategy.experimental_local_results(_lowerCAmelCase )
local_variables[0].assign(_lowerCAmelCase )
local_variables[1].assign(_lowerCAmelCase )
strategy.run(_lowerCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowerCAmelCase )
def _check_local_values(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowerCAmelCase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _lowerCAmelCase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 588
| 0
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = 0
@slow
def lowercase_ (self ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase__ ) , 0 )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
# Check that tokenizer_type ≠ model_type
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCamelCase__ , "vocab.txt" ) )
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="bert" , use_fast=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCamelCase__ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCamelCase__ , "merges.txt" ) )
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="gpt2" , use_fast=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@require_tokenizers
def lowercase_ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCamelCase__ , "vocab.txt" ) )
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="bert" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCamelCase__ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCamelCase__ , "merges.txt" ) )
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="gpt2" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ (self ):
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def lowercase_ (self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_UpperCamelCase : int = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase__ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase__ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def lowercase_ (self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase__ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
_UpperCamelCase : Any = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = TOKENIZER_MAPPING.values()
_UpperCamelCase : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase__ )
@require_tokenizers
def lowercase_ (self ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , UpperCamelCase__ )
@require_tokenizers
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=UpperCamelCase__ )
_UpperCamelCase : Union[str, Any] = "Hello, world. How are you?"
_UpperCamelCase : List[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertEqual("[UNK]" , tokens[0] )
_UpperCamelCase : int = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=UpperCamelCase__ )
_UpperCamelCase : Dict = tokenizer.tokenize(UpperCamelCase__ )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
_UpperCamelCase : str = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = get_tokenizer_config("bert-base-cased" )
_UpperCamelCase : Dict = config.pop("_commit_hash" , UpperCamelCase__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase__ , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_UpperCamelCase : int = get_tokenizer_config(UpperCamelCase__ )
self.assertDictEqual(UpperCamelCase__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
_UpperCamelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def lowercase_ (self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
_UpperCamelCase : Tuple = CustomTokenizer.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ (self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , UpperCamelCase__ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Union[str, Any] = BertTokenizerFast.from_pretrained(UpperCamelCase__ )
bert_tokenizer.save_pretrained(UpperCamelCase__ )
_UpperCamelCase : Optional[int] = CustomTokenizerFast.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ (self ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
_UpperCamelCase : Any = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ )
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def lowercase_ (self ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( _A ):
'''simple docstring'''
__UpperCAmelCase = False
class _SCREAMING_SNAKE_CASE ( _A ):
'''simple docstring'''
__UpperCAmelCase = NewTokenizer
__UpperCAmelCase = False
try:
AutoConfig.register("custom" , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
# If remote code is not set, the default is to use local
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
_UpperCamelCase : int = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
_UpperCamelCase : Any = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def lowercase_ (self ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base" )
def lowercase_ (self ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_UpperCamelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase__ , revision="aaaaaa" )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 710
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """data2vec-text"""
def __init__(self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : List[Any] = type_vocab_size
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : List[Any] = use_cache
_UpperCamelCase : str = classifier_dropout
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def lowercase_ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 239
| 0
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
UpperCamelCase_ : Optional[int] = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
UpperCamelCase_ : str = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
UpperCamelCase_ : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCamelCase_ : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCamelCase_ : List[Any] = value
elif weight_type == "weight_v":
UpperCamelCase_ : Any = value
elif weight_type == "bias":
UpperCamelCase_ : int = value
else:
UpperCamelCase_ : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : List[Any] ):
UpperCamelCase_ : Union[str, Any] = []
UpperCamelCase_ : Optional[int] = fairseq_model.state_dict()
UpperCamelCase_ : str = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_ : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase_ : Optional[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCamelCase_ : Optional[int] = True
if "*" in mapped_key:
UpperCamelCase_ : Any = name.split(lowerCamelCase )[0].split('.' )[-2]
UpperCamelCase_ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
UpperCamelCase_ : Optional[Any] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_ : List[str] = 'weight_v'
elif "bias" in name:
UpperCamelCase_ : List[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase_ : Tuple = 'weight'
else:
UpperCamelCase_ : Optional[Any] = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] ):
UpperCamelCase_ : Optional[Any] = full_name.split('conv_layers.' )[-1]
UpperCamelCase_ : int = name.split('.' )
UpperCamelCase_ : str = int(items[0] )
UpperCamelCase_ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCamelCase_ : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCamelCase_ : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
UpperCamelCase_ : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
UpperCamelCase_ : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCamelCase )
@torch.no_grad()
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[Any]=True ):
if config_path is not None:
UpperCamelCase_ : Tuple = UniSpeechSatConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_ : List[str] = UniSpeechSatConfig()
UpperCamelCase_ : Union[str, Any] = ''
if is_finetuned:
UpperCamelCase_ : Any = UniSpeechSatForCTC(lowerCamelCase )
else:
UpperCamelCase_ : str = UniSpeechSatForPreTraining(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
UpperCamelCase_ : str = model[0].eval()
recursively_load_weights(lowerCamelCase , lowerCamelCase )
hf_wavavec.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 417
|
import math
import os
import sys
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Dict = ''
try:
with open(lowerCamelCase , 'rb' ) as binary_file:
UpperCamelCase_ : Union[str, Any] = binary_file.read()
for dat in data:
UpperCamelCase_ : Optional[int] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __lowercase ( lowerCamelCase : dict[str, str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : str ):
lexicon.pop(lowerCamelCase )
UpperCamelCase_ : Optional[int] = last_match_id
if math.loga(lowerCamelCase ).is_integer():
for curr_key in lexicon:
UpperCamelCase_ : Optional[int] = '0' + lexicon[curr_key]
UpperCamelCase_ : List[str] = bin(lowerCamelCase )[2:]
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = {'0': '0', '1': '1'}
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = '', ''
UpperCamelCase_ : List[str] = len(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase_ : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
index += 1
UpperCamelCase_ : Optional[int] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase_ : Any = lexicon[curr_string]
result += last_match_id
return result
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Union[str, Any] = os.path.getsize(lowerCamelCase )
UpperCamelCase_ : List[str] = bin(lowerCamelCase )[2:]
UpperCamelCase_ : int = len(lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Optional[int] = 8
try:
with open(lowerCamelCase , 'wb' ) as opened_file:
UpperCamelCase_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCamelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Dict = read_file_binary(lowerCamelCase )
UpperCamelCase_ : Optional[int] = compress_data(lowerCamelCase )
UpperCamelCase_ : Dict = add_file_length(lowerCamelCase , lowerCamelCase )
write_file_binary(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 417
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Any , **lowerCamelCase__ :int ):
super().__init__(**lowerCamelCase__ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , """vision""" )
self.check_model_type(lowerCamelCase__ )
def __call__( self :Optional[int] , lowerCamelCase__ :Union[str, "Image.Image", List[Dict[str, Any]]] , lowerCamelCase__ :Union[str, List[str]] = None , **lowerCamelCase__ :str , ):
if "text_queries" in kwargs:
UpperCamelCase__ :Dict = kwargs.pop("""text_queries""" )
if isinstance(lowerCamelCase__ , (str, Image.Image) ):
UpperCamelCase__ :Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
UpperCamelCase__ :Dict = image
UpperCamelCase__ :Any = super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
return results
def __a ( self :str , **lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Tuple = {}
if "threshold" in kwargs:
UpperCamelCase__ :Dict = kwargs["""threshold"""]
if "top_k" in kwargs:
UpperCamelCase__ :List[str] = kwargs["""top_k"""]
return {}, {}, postprocess_params
def __a ( self :Dict , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :Union[str, Any] = load_image(inputs["""image"""] )
UpperCamelCase__ :Any = inputs["""candidate_labels"""]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = candidate_labels.split(""",""" )
UpperCamelCase__ :Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = self.tokenizer(lowerCamelCase__ , return_tensors=self.framework )
UpperCamelCase__ :Tuple = self.image_processor(lowerCamelCase__ , return_tensors=self.framework )
yield {
"is_last": i == len(lowerCamelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __a ( self :List[str] , lowerCamelCase__ :Any ):
UpperCamelCase__ :Optional[Any] = model_inputs.pop("""target_size""" )
UpperCamelCase__ :int = model_inputs.pop("""candidate_label""" )
UpperCamelCase__ :Any = model_inputs.pop("""is_last""" )
UpperCamelCase__ :Optional[int] = self.model(**lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def __a ( self :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple=0.1 , lowerCamelCase__ :List[Any]=None ):
UpperCamelCase__ :Optional[int] = []
for model_output in model_outputs:
UpperCamelCase__ :Tuple = model_output["""candidate_label"""]
UpperCamelCase__ :int = BaseModelOutput(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = self.image_processor.post_process_object_detection(
outputs=lowerCamelCase__ , threshold=lowerCamelCase__ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
UpperCamelCase__ :Optional[Any] = outputs["""scores"""][index].item()
UpperCamelCase__ :int = self._get_bounding_box(outputs["""boxes"""][index][0] )
UpperCamelCase__ :List[str] = {"""score""": score, """label""": label, """box""": box}
results.append(lowerCamelCase__ )
UpperCamelCase__ :int = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x["score"] , reverse=lowerCamelCase__ )
if top_k:
UpperCamelCase__ :int = results[:top_k]
return results
def __a ( self :int , lowerCamelCase__ :"torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = box.int().tolist()
UpperCamelCase__ :List[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 383
|
def A ( lowercase__ : int ) -> bool:
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :int = (1 << p) - 1
for _ in range(p - 2 ):
UpperCamelCase__ :str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 383
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> Union[str, Any]:
_lowerCAmelCase : Any = 1
_lowerCAmelCase : List[Any] = 2
while i * i <= n:
_lowerCAmelCase : str = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _UpperCAmelCase ( ) -> Dict:
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Optional[int] = 1
while True:
i += 1
t_num += i
if count_divisors(_lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 384
|
'''simple docstring'''
UpperCamelCase_ = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def _UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 384
| 1
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None )-> List[str]:
'''simple docstring'''
super().__init__()
__UpperCamelCase = pad_token_id
__UpperCamelCase = max_length
__UpperCamelCase = vocab
__UpperCamelCase = merges
__UpperCamelCase = BytePairTokenizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sequence_length=SCREAMING_SNAKE_CASE_ )
@classmethod
def A__ ( cls , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = [''' '''.join(SCREAMING_SNAKE_CASE_ ) for m in tokenizer.bpe_ranks.keys()]
__UpperCamelCase = tokenizer.get_vocab()
return cls(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def A__ ( cls , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return cls.from_tokenizer(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def A__ ( cls , SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
return cls(**SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = self.tf_tokenizer(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = tf.ones_like(SCREAMING_SNAKE_CASE_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
__UpperCamelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
__UpperCamelCase , __UpperCamelCase = pad_model_inputs(
SCREAMING_SNAKE_CASE_ , max_seq_length=SCREAMING_SNAKE_CASE_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 451
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 * 8 , SCREAMING_SNAKE_CASE_=32 * 8 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=64 , )-> Dict:
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = is_training
__UpperCamelCase = use_auxiliary_loss
__UpperCamelCase = num_queries
__UpperCamelCase = num_channels
__UpperCamelCase = min_size
__UpperCamelCase = max_size
__UpperCamelCase = num_labels
__UpperCamelCase = hidden_dim
__UpperCamelCase = hidden_dim
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
__UpperCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
__UpperCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__UpperCamelCase = self.num_queries
__UpperCamelCase = self.num_labels
__UpperCamelCase = [1, 1, 1, 1]
__UpperCamelCase = self.num_channels
__UpperCamelCase = 64
__UpperCamelCase = 128
__UpperCamelCase = self.hidden_dim
__UpperCamelCase = self.hidden_dim
__UpperCamelCase = self.hidden_dim
return config
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = output.encoder_hidden_states
__UpperCamelCase = output.pixel_decoder_hidden_states
__UpperCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , config.decoder_layers )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False )-> Tuple:
'''simple docstring'''
with torch.no_grad():
__UpperCamelCase = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
__UpperCamelCase = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCamelCase = model(pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(
pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_snake_case = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = MaskaFormerModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def A__ ( self )-> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def A__ ( self )-> List[str]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def A__ ( self )-> Dict:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A__ ( self )-> str:
'''simple docstring'''
pass
def A__ ( self )-> str:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self )-> Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__UpperCamelCase = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = (self.model_tester.min_size,) * 2
__UpperCamelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE_ ),
'''class_labels''': torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE_ ).long(),
}
__UpperCamelCase = self.model_tester.get_config()
__UpperCamelCase = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def A__ ( self )-> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase = self.all_model_classes[1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.all_model_classes[1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__UpperCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ : Any = 1e-4
def A_ ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self )-> List[Any]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self )-> Any:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_ , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_ , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
__UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__UpperCamelCase = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
__UpperCamelCase = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
__UpperCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__UpperCamelCase = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def A__ ( self )-> str:
'''simple docstring'''
__UpperCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
__UpperCamelCase = inputs['''pixel_values'''].to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs['''mask_labels''']]
__UpperCamelCase = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 451
| 1
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __UpperCAmelCase ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
__a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a = CLIPTextModel(__A )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , __A , __A=0 ):
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = 2
__a = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
__a = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def snake_case_ ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def snake_case_ ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __UpperCAmelCase ( __A , __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
__a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a = CLIPTextModel(__A )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = MultiControlNetModel([controlneta, controlneta] )
__a = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , __A , __A=0 ):
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = 2
__a = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
__a = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def snake_case_ ( self ):
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
pipe.to(__A )
__a = 10.0
__a = 4
__a = self.get_dummy_inputs(__A )
__a = steps
__a = scale
__a = pipe(**__A )[0]
__a = self.get_dummy_inputs(__A )
__a = steps
__a = scale
__a = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__a = self.get_dummy_inputs(__A )
__a = steps
__a = scale
__a = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__a = self.get_dummy_inputs(__A )
__a = steps
__a = scale
__a = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def snake_case_ ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def snake_case_ ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def snake_case_ ( self ):
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
__a = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
__a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
__a = torch.Generator(device="""cpu""" ).manual_seed(0 )
__a = """evil space-punk bird"""
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
__a = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
__a = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
__a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 99
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
def __init__( self : Optional[int] ) -> Optional[int]:
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 2_56
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
__lowerCamelCase = cva.imread(SCREAMING_SNAKE_CASE__ , 0 )
__lowerCamelCase = copy.deepcopy(self.img )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' )
__lowerCamelCase = np.sum(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowerCamelCase = x[i] / self.k
self.sk += prk
__lowerCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCamelCase = int(last % last )
__lowerCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
__lowerCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowerCamelCase = self.img[j][i]
if num != self.last_list[num]:
__lowerCamelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def __A ( self : str ) -> Union[str, Any]:
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
SCREAMING_SNAKE_CASE__ : List[str] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 298
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[str]=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__SCREAMING_SNAKE_CASE : int = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__SCREAMING_SNAKE_CASE : Dict = ""
else:
__SCREAMING_SNAKE_CASE : List[Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: config.hidden_size]
__SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__SCREAMING_SNAKE_CASE : int = in_proj_weight[
-config.hidden_size :, :
]
__SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = val
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
__SCREAMING_SNAKE_CASE : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1000
__SCREAMING_SNAKE_CASE : Tuple = "huggingface/label-files"
__SCREAMING_SNAKE_CASE : Optional[Any] = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Tuple = idalabel
__SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(deit_name[-6:-4] )
__SCREAMING_SNAKE_CASE : List[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 192
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Union[str, Any] = 12
__SCREAMING_SNAKE_CASE : Any = 3
elif deit_name[9:].startswith("small" ):
__SCREAMING_SNAKE_CASE : Optional[int] = 384
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1536
__SCREAMING_SNAKE_CASE : List[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
__SCREAMING_SNAKE_CASE : Dict = 1024
__SCREAMING_SNAKE_CASE : Optional[int] = 4096
__SCREAMING_SNAKE_CASE : Dict = 24
__SCREAMING_SNAKE_CASE : int = 16
# load original model from timm
__SCREAMING_SNAKE_CASE : Dict = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__SCREAMING_SNAKE_CASE : int = timm_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(__lowerCamelCase , __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
__SCREAMING_SNAKE_CASE : Dict = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
__SCREAMING_SNAKE_CASE : Optional[int] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__SCREAMING_SNAKE_CASE : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase , crop_size=config.image_size )
__SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = encoding["pixel_values"]
__SCREAMING_SNAKE_CASE : Optional[int] = model(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 447
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCamelCase = False
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = """ybelkada/fonts"""
def _lowerCAmelCase ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def _lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict ):
"""simple docstring"""
requires_backends(__lowerCamelCase , ["torch"] )
_check_torch_version()
__SCREAMING_SNAKE_CASE : List[Any] = image_tensor.unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.functional.unfold(__lowerCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__SCREAMING_SNAKE_CASE : List[Any] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __lowerCamelCase , __lowerCamelCase , -1 )
__SCREAMING_SNAKE_CASE : Any = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : int = 36 , __lowerCamelCase : str = "black" , __lowerCamelCase : str = "white" , __lowerCamelCase : int = 5 , __lowerCamelCase : int = 5 , __lowerCamelCase : int = 5 , __lowerCamelCase : int = 5 , __lowerCamelCase : Optional[bytes] = None , __lowerCamelCase : Optional[str] = None , ):
"""simple docstring"""
requires_backends(__lowerCamelCase , "vision" )
# Add new lines so that each line is no more than 80 characters.
__SCREAMING_SNAKE_CASE : Union[str, Any] = textwrap.TextWrapper(width=80 )
__SCREAMING_SNAKE_CASE : Optional[Any] = wrapper.wrap(text=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
if font_bytes is not None and font_path is None:
__SCREAMING_SNAKE_CASE : List[Any] = io.BytesIO(__lowerCamelCase )
elif font_path is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = font_path
else:
__SCREAMING_SNAKE_CASE : Tuple = hf_hub_download(__lowerCamelCase , "Arial.TTF" )
__SCREAMING_SNAKE_CASE : List[Any] = ImageFont.truetype(__lowerCamelCase , encoding="UTF-8" , size=__lowerCamelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__SCREAMING_SNAKE_CASE : str = ImageDraw.Draw(Image.new("RGB" , (1, 1) , __lowerCamelCase ) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = temp_draw.textbbox((0, 0) , __lowerCamelCase , __lowerCamelCase )
# Create the actual image with a bit of padding around the text.
__SCREAMING_SNAKE_CASE : Union[str, Any] = text_width + left_padding + right_padding
__SCREAMING_SNAKE_CASE : Tuple = text_height + top_padding + bottom_padding
__SCREAMING_SNAKE_CASE : Tuple = Image.new("RGB" , (image_width, image_height) , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = ImageDraw.Draw(__lowerCamelCase )
draw.text(xy=(left_padding, top_padding) , text=__lowerCamelCase , fill=__lowerCamelCase , font=__lowerCamelCase )
return image
def _lowerCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : str , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(__lowerCamelCase , "vision" )
# Convert to PIL image if necessary
__SCREAMING_SNAKE_CASE : int = to_pil_image(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = render_text(__lowerCamelCase , **__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = max(header_image.width , image.width )
__SCREAMING_SNAKE_CASE : Tuple = int(image.height * (new_width / image.width) )
__SCREAMING_SNAKE_CASE : Any = int(header_image.height * (new_width / header_image.width) )
__SCREAMING_SNAKE_CASE : Tuple = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__SCREAMING_SNAKE_CASE : Dict = to_numpy_array(__lowerCamelCase )
if infer_channel_dimension_format(__lowerCamelCase ) == ChannelDimension.LAST:
__SCREAMING_SNAKE_CASE : Any = to_channel_dimension_format(__lowerCamelCase , ChannelDimension.LAST )
return new_image
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = ["""flattened_patches"""]
def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : int = 2_0_4_8 , UpperCamelCase : bool = False , **UpperCamelCase : Optional[int] , )->None:
super().__init__(**UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
__SCREAMING_SNAKE_CASE : List[Any] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = do_convert_rgb
__SCREAMING_SNAKE_CASE : List[str] = max_patches
__SCREAMING_SNAKE_CASE : List[Any] = is_vqa
def __snake_case ( self : Tuple , UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : dict , **UpperCamelCase : Tuple )->np.ndarray:
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
__SCREAMING_SNAKE_CASE : Optional[Any] = to_channel_dimension_format(UpperCamelCase , ChannelDimension.FIRST )
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(UpperCamelCase )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = patch_size["height"], patch_size["width"]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = get_image_size(UpperCamelCase )
# maximize scale s.t.
__SCREAMING_SNAKE_CASE : List[str] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__SCREAMING_SNAKE_CASE : List[str] = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase ) , 1 )
__SCREAMING_SNAKE_CASE : Tuple = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase ) , 1 )
__SCREAMING_SNAKE_CASE : List[str] = max(num_feasible_rows * patch_height , 1 )
__SCREAMING_SNAKE_CASE : int = max(num_feasible_cols * patch_width , 1 )
__SCREAMING_SNAKE_CASE : Any = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=UpperCamelCase , antialias=UpperCamelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__SCREAMING_SNAKE_CASE : List[str] = torch_extract_patches(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = patches.shape
__SCREAMING_SNAKE_CASE : int = patches_shape[1]
__SCREAMING_SNAKE_CASE : List[str] = patches_shape[2]
__SCREAMING_SNAKE_CASE : List[str] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__SCREAMING_SNAKE_CASE : Union[str, Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__SCREAMING_SNAKE_CASE : Any = torch.arange(UpperCamelCase ).reshape([rows, 1] ).repeat(1 , UpperCamelCase ).reshape([rows * columns, 1] )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.arange(UpperCamelCase ).reshape([1, columns] ).repeat(UpperCamelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__SCREAMING_SNAKE_CASE : str = row_ids.to(torch.floataa )
__SCREAMING_SNAKE_CASE : int = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__SCREAMING_SNAKE_CASE : Any = torch.nn.functional.pad(UpperCamelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
__SCREAMING_SNAKE_CASE : str = to_numpy_array(UpperCamelCase )
return result
def __snake_case ( self : Optional[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] )->np.ndarray:
if image.dtype == np.uinta:
__SCREAMING_SNAKE_CASE : Optional[int] = image.astype(np.floataa )
# take mean across the whole `image`
__SCREAMING_SNAKE_CASE : int = np.mean(UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = np.std(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = max(UpperCamelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Union[str, Any] , UpperCamelCase : ImageInput , UpperCamelCase : Optional[str] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Dict[str, int]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase : Dict , )->ImageInput:
__SCREAMING_SNAKE_CASE : int = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__SCREAMING_SNAKE_CASE : List[Any] = patch_size if patch_size is not None else self.patch_size
__SCREAMING_SNAKE_CASE : List[str] = max_patches if max_patches is not None else self.max_patches
__SCREAMING_SNAKE_CASE : List[Any] = self.is_vqa
if kwargs.get("data_format" , UpperCamelCase ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
__SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__SCREAMING_SNAKE_CASE : Optional[int] = [convert_to_rgb(UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : str = [to_numpy_array(UpperCamelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop("font_bytes" , UpperCamelCase )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop("font_path" , UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = [header_text] * len(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = [
render_header(UpperCamelCase , header_text[i] , font_bytes=UpperCamelCase , font_path=UpperCamelCase )
for i, image in enumerate(UpperCamelCase )
]
if do_normalize:
__SCREAMING_SNAKE_CASE : List[str] = [self.normalize(image=UpperCamelCase ) for image in images]
# convert to torch tensor and permute
__SCREAMING_SNAKE_CASE : str = [
self.extract_flattened_patches(image=UpperCamelCase , max_patches=UpperCamelCase , patch_size=UpperCamelCase )
for image in images
]
# create attention mask in numpy
__SCREAMING_SNAKE_CASE : List[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__SCREAMING_SNAKE_CASE : Dict = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=UpperCamelCase )
return encoded_outputs
| 447
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase__ = Features({'question': Value('string' ), 'context': Value('string' )} )
lowerCAmelCase__ = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
lowerCAmelCase__ = 'question'
lowerCAmelCase__ = 'context'
lowerCAmelCase__ = 'answers'
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 398
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ )
lowercase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
lowercase = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_UpperCamelCase : Optional[int] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : Union[str, Any] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenLlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case__ )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case__ )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case__ )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
lowercase = original_model(snake_case__ ).last_hidden_state
lowercase = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
lowercase = scaled_model(snake_case__ ).last_hidden_state
lowercase = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 706
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
UpperCAmelCase = '''▁'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case="</s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case=100 , snake_case=None , snake_case = None , snake_case=True , **snake_case , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase = [F'''<extra_id_{i}>''' for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase = len(set(filter(lambda snake_case : bool('extra_id' in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
lowercase = legacy
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case , **snake_case , )
lowercase = vocab_file
lowercase = extra_ids
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case , snake_case ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case )) + [1]
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ):
return list(
set(filter(lambda snake_case : bool(re.search(r'<extra_id_\d+>' , snake_case ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return [self._convert_token_to_id(snake_case ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = self._add_eos_if_not_present(snake_case )
if token_ids_a is None:
return token_ids_a
else:
lowercase = self._add_eos_if_not_present(snake_case )
return token_ids_a + token_ids_a
def __getstate__( self ):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , snake_case ):
lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowercase = SPIECE_UNDERLINE + text.replace(snake_case , ' ' )
return super().tokenize(snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ):
if not self.legacy:
lowercase = text.startswith(snake_case )
if is_first:
lowercase = text[1:]
lowercase = self.sp_model.encode(snake_case , out_type=snake_case )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case ):
lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if token.startswith('<extra_id_' ):
lowercase = re.match(r'<extra_id_(\d+)>' , snake_case )
lowercase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if index < self.sp_model.get_piece_size():
lowercase = self.sp_model.IdToPiece(snake_case )
else:
lowercase = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
lowercase = ''
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(snake_case )
lowercase = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 565
| 0
|
import os
import sys
A : Tuple = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : List[Any] ) -> str:
"""simple docstring"""
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *__magic_name__ : Dict , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 15
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase = datasets.logging.get_logger(__name__)
lowerCAmelCase = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
lowerCAmelCase = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
lowerCAmelCase = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_="dummy_doc" ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = {doc: key_lines}
__UpperCAmelCase : Tuple = {doc: sys_lines}
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Any = 0
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = reader.get_doc_mentions(lowercase_ , key_doc_lines[doc] , lowercase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__UpperCAmelCase : List[Any] = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = reader.get_doc_mentions(lowercase_ , sys_doc_lines[doc] , lowercase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__UpperCAmelCase : Union[str, Any] = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
if remove_nested:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__UpperCAmelCase , __UpperCAmelCase : str = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__UpperCAmelCase : List[Any] = reader.get_mention_assignments(lowercase_ , lowercase_ )
__UpperCAmelCase : Optional[Any] = reader.get_mention_assignments(lowercase_ , lowercase_ )
__UpperCAmelCase : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = get_coref_infos(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 0
for name, metric in metrics:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = evaluator.evaluate_documents(lowercase_ , lowercase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 100:.2f}" , f" Precision: {precision * 100:.2f}" , f" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__UpperCAmelCase : Optional[int] = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
__UpperCAmelCase : List[Any] = line.split()[5]
if not parse_col == "-":
__UpperCAmelCase : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def A( self , lowercase__ , lowercase__ , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False):
__UpperCAmelCase : List[Any] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
__UpperCAmelCase : Optional[Any] = util.check_gold_parse_annotation(lowercase__)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__UpperCAmelCase : Tuple = evaluate(
key_lines=lowercase__ , sys_lines=lowercase__ , metrics=lowercase__ , NP_only=lowercase__ , remove_nested=lowercase__ , keep_singletons=lowercase__ , min_span=lowercase__ , )
return score
| 462
| 0
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase :
'''simple docstring'''
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return None
class lowerCAmelCase :
'''simple docstring'''
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return None
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] =[
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase__ , 'tf' , 12 , **UpperCAmelCase__ )
@require_torch
@slow
def a__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase__ , 'pt' , 12 , **UpperCAmelCase__ )
@require_torch
@slow
def a__ ( self ):
from transformers import BertModel
_A= ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(UpperCAmelCase__ ) )
vocab_file.flush()
_A= BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_A= BertModel(BertConfig(vocab_size=len(UpperCAmelCase__ ) ) )
model.save_pretrained(UpperCAmelCase__ )
self._test_export(UpperCAmelCase__ , 'pt' , 12 , UpperCAmelCase__ )
@require_tf
@slow
def a__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_A= self._test_export(UpperCAmelCase__ , 'tf' , 12 , **UpperCAmelCase__ )
_A= quantize(Path(UpperCAmelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase__ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_A= self._test_export(UpperCAmelCase__ , 'pt' , 12 , **UpperCAmelCase__ )
_A= quantize(UpperCAmelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase__ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
_A= Path(UpperCAmelCase__ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
return path
except Exception as e:
self.fail(UpperCAmelCase__ )
@require_torch
@require_tokenizers
@slow
def a__ ( self ):
from transformers import BertModel
_A= BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_A= BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(UpperCAmelCase__ , UpperCAmelCase__ , 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ ( self ):
from transformers import TFBertModel
_A= TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_A= BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(UpperCAmelCase__ , UpperCAmelCase__ , 'tf' )
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_A= FeatureExtractionPipeline(UpperCAmelCase__ , UpperCAmelCase__ )
_A= ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
_A= infer_shapes(UpperCAmelCase__ , UpperCAmelCase__ )
# Assert all variables are present
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCAmelCase__ )
self.assertSequenceEqual(variable_names[3:] , UpperCAmelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def a__ ( self ):
_A= ['''input_ids''', '''attention_mask''', '''token_type_ids''']
_A= {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
_A= ensure_valid_input(FuncContiguousArgs() , UpperCAmelCase__ , UpperCAmelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCAmelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCAmelCase__ ) , set(UpperCAmelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCAmelCase__ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_A= ensure_valid_input(FuncNonContiguousArgs() , UpperCAmelCase__ , UpperCAmelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCAmelCase__ ) , 1 )
self.assertEqual(len(UpperCAmelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def a__ ( self ):
_A= generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 718
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ = True
except (ImportError, AttributeError):
UpperCAmelCase_ = object
def UpperCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
'''simple docstring'''
pass
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger('''transformers-cli/serving''')
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCAmelCase_ , args.host , args.port , args.workers )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : dict
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[str]
_SCREAMING_SNAKE_CASE : Optional[List[int]]
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : str
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Any
class lowerCAmelCase ( _a ):
@staticmethod
def a__ ( lowerCAmelCase__ ):
_A= parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase__ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase__ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase__ , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase__ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase__ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase__ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase__ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_A= pipeline
_A= host
_A= port
_A= workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"Serving model over {host}:{port}" )
_A= FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
] , timeout=600 , )
def a__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def a__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def a__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ):
try:
_A= self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
_A= self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
def a__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ):
try:
_A= self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
async def a__ ( self , lowerCAmelCase__=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ):
# Check we don't have empty string
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_A= self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(500 , {'error': str(lowerCAmelCase__ )} )
| 476
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(number**0.5)
return number == sq * sq
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE = x_den * y_den * z_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase__ (_UpperCAmelCase = 35):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = Fraction(0)
SCREAMING_SNAKE_CASE = 42
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
SCREAMING_SNAKE_CASE = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE = x_den * y_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=2
SCREAMING_SNAKE_CASE = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase) and is_sq(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=-1
SCREAMING_SNAKE_CASE = x_num * y_num
SCREAMING_SNAKE_CASE = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=2
SCREAMING_SNAKE_CASE = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase) and is_sq(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase)
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73
|
def _snake_case (_snake_case : list , _snake_case : int , _snake_case : int = 0 , _snake_case : int = 0) -> int:
_lowercase =right or len(_snake_case) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_snake_case , _snake_case , left + 1 , right - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =XLMRobertaTokenizer
snake_case_ =XLMRobertaTokenizerFast
snake_case_ =True
snake_case_ =True
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : str = XLMRobertaTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = '''<pad>'''
lowerCAmelCase__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-1] ,'''<mask>''' )
self.assertEqual(len(__lowerCamelCase ) ,10_02 )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,10_02 )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = XLMRobertaTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
lowerCAmelCase__ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
lowerCAmelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Any = tokenizer_r.save_pretrained(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase__ : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowerCamelCase ,__lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : int = tokenizer_r.from_pretrained(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase ,__lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ : Any = tempfile.mkdtemp()
lowerCAmelCase__ : str = tokenizer_r.save_pretrained(__lowerCamelCase ,legacy_format=__lowerCamelCase )
lowerCAmelCase__ : Any = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase ,__lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : List[Any] = tokenizer_r.from_pretrained(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase ,__lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : Dict = tokenizer_r.save_pretrained(__lowerCamelCase ,legacy_format=__lowerCamelCase )
lowerCAmelCase__ : List[str] = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : Tuple = tokenizer_r.from_pretrained(__lowerCamelCase )
lowerCAmelCase__ : Tuple = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase ,__lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@cached_property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCamelCase ,f.name )
lowerCAmelCase__ : Optional[int] = XLMRobertaTokenizer(f.name ,keep_accents=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = pickle.dumps(__lowerCamelCase )
pickle.loads(__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ : List[str] = self.get_rust_tokenizer()
lowerCAmelCase__ : Tuple = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ : Tuple = tokenizer.tokenize(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Tuple = self.get_rust_tokenizer()
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode(__lowerCamelCase )
lowerCAmelCase__ : Any = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = '''Hello World!'''
lowerCAmelCase__ : Optional[int] = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase ,self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase__ : Optional[Any] = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase ,self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase ,model_name='''xlm-roberta-base''' ,revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' ,)
| 90
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : int =logging.get_logger(__name__)
__snake_case : List[Any] ={
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
__snake_case : Any ={
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
__snake_case : Optional[int] ={
'ctrl': 2_5_6,
}
__snake_case : Optional[int] ={
'Pregnancy': 1_6_8_6_2_9,
'Christianity': 7_6_7_5,
'Explain': 1_0_6_4_2_3,
'Fitness': 6_3_4_4_0,
'Saving': 6_3_1_6_3,
'Ask': 2_7_1_7_1,
'Ass': 9_5_9_8_5,
'Joke': 1_6_3_5_0_9,
'Questions': 4_5_6_2_2,
'Thoughts': 4_9_6_0_5,
'Retail': 5_2_3_4_2,
'Feminism': 1_6_4_3_3_8,
'Writing': 1_1_9_9_2,
'Atheism': 1_9_2_2_6_3,
'Netflix': 4_8_6_1_6,
'Computing': 3_9_6_3_9,
'Opinion': 4_3_2_1_3,
'Alone': 4_4_9_6_7,
'Funny': 5_8_9_1_7,
'Gaming': 4_0_3_5_8,
'Human': 4_0_8_8,
'India': 1_3_3_1,
'Joker': 7_7_1_3_8,
'Diet': 3_6_2_0_6,
'Legal': 1_1_8_5_9,
'Norman': 4_9_3_9,
'Tip': 7_2_6_8_9,
'Weight': 5_2_3_4_3,
'Movies': 4_6_2_7_3,
'Running': 2_3_4_2_5,
'Science': 2_0_9_0,
'Horror': 3_7_7_9_3,
'Confession': 6_0_5_7_2,
'Finance': 1_2_2_5_0,
'Politics': 1_6_3_6_0,
'Scary': 1_9_1_9_8_5,
'Support': 1_2_6_5_4,
'Technologies': 3_2_5_1_6,
'Teenage': 6_6_1_6_0,
'Event': 3_2_7_6_9,
'Learned': 6_7_4_6_0,
'Notion': 1_8_2_7_7_0,
'Wikipedia': 3_7_5_8_3,
'Books': 6_6_6_5,
'Extract': 7_6_0_5_0,
'Confessions': 1_0_2_7_0_1,
'Conspiracy': 7_5_9_3_2,
'Links': 6_3_6_7_4,
'Narcissus': 1_5_0_4_2_5,
'Relationship': 5_4_7_6_6,
'Relationships': 1_3_4_7_9_6,
'Reviews': 4_1_6_7_1,
'News': 4_2_5_6,
'Translation': 2_6_8_2_0,
'multilingual': 1_2_8_4_0_6,
}
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ : Tuple = set()
lowerCAmelCase__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase__ : List[Any] = char
lowerCAmelCase__ : Dict = set(lowerCamelCase_)
return pairs
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ =CONTROL_CODES
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase="<unk>" ,**__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(unk_token=__lowerCamelCase ,**__lowerCamelCase )
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ : List[str] = json.load(__lowerCamelCase )
lowerCAmelCase__ : int = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase ,encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ : str = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ : int = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = {}
@property
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Union[str, Any] = tuple(__lowerCamelCase )
lowerCAmelCase__ : Tuple = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ : Union[str, Any] = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : Optional[Any] = min(__lowerCamelCase ,key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : int = bigram
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : int = 0
while i < len(__lowerCamelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(__lowerCamelCase ,__lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : int = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : Optional[Any] = tuple(__lowerCamelCase )
lowerCAmelCase__ : Any = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowerCAmelCase__ : Optional[Any] = get_pairs(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = '''@@ '''.join(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = word[:-4]
lowerCAmelCase__ : List[Any] = word
return word
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : int = re.findall(R'''\S+\n?''' ,__lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.encoder.get(__lowerCamelCase ,self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.decoder.get(__lowerCamelCase ,self.unk_token )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' ,'''''' ).strip()
return out_string
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : List[Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__lowerCamelCase ,ensure_ascii=__lowerCamelCase ) + '''\n''' )
lowerCAmelCase__ : Optional[Any] = 0
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ : Optional[int] = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 90
| 1
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __snake_case :
snake_case__ : CommonSchedulerState
# setable values
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
snake_case__ : Optional[int] = None
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , __lowerCAmelCase : CommonSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
return cls(common=__lowerCAmelCase , init_noise_sigma=__lowerCAmelCase , timesteps=__lowerCAmelCase )
@dataclass
class __snake_case ( _lowercase):
snake_case__ : DDPMSchedulerState
class __snake_case ( _lowercase , _lowercase):
snake_case__ : Union[str, Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
snake_case__ : jnp.dtype
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : List[str] , __lowerCAmelCase : int = 1_0_0_0 , __lowerCAmelCase : float = 0.00_01 , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : str = "linear" , __lowerCAmelCase : Optional[jnp.ndarray] = None , __lowerCAmelCase : str = "fixed_small" , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "epsilon" , __lowerCAmelCase : jnp.dtype = jnp.floataa , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dtype
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Optional[CommonSchedulerState] = None ):
"""simple docstring"""
if common is None:
_lowerCamelCase : List[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_lowerCamelCase : Dict = jnp.array(1.0 , dtype=self.dtype )
_lowerCamelCase : Union[str, Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCAmelCase , init_noise_sigma=__lowerCAmelCase , timesteps=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : DDPMSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : DDPMSchedulerState , __lowerCAmelCase : int , __lowerCAmelCase : Tuple = () ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Any = (jnp.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : DDPMSchedulerState , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : int=None ):
"""simple docstring"""
_lowerCamelCase : str = state.common.alphas_cumprod[t]
_lowerCamelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_lowerCamelCase : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_lowerCamelCase : Optional[Any] = jnp.clip(__lowerCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_lowerCamelCase : Optional[Any] = jnp.log(jnp.clip(__lowerCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
_lowerCamelCase : List[str] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_lowerCamelCase : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_lowerCamelCase : Dict = variance
_lowerCamelCase : Union[str, Any] = state.common.betas[t]
_lowerCamelCase : Union[str, Any] = (predicted_variance + 1) / 2
_lowerCamelCase : Optional[int] = frac * max_log + (1 - frac) * min_log
return variance
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : DDPMSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : Optional[jax.random.KeyArray] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
_lowerCamelCase : str = timestep
if key is None:
_lowerCamelCase : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_lowerCamelCase , _lowerCamelCase : Dict = jnp.split(__lowerCAmelCase , sample.shape[1] , axis=1 )
else:
_lowerCamelCase : Union[str, Any] = None
# 1. compute alphas, betas
_lowerCamelCase : str = state.common.alphas_cumprod[t]
_lowerCamelCase : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_lowerCamelCase : Optional[Any] = 1 - alpha_prod_t
_lowerCamelCase : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : Dict = model_output
elif self.config.prediction_type == "v_prediction":
_lowerCamelCase : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Tuple = jnp.clip(__lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : Any = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_lowerCamelCase : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_lowerCamelCase : Dict = jax.random.split(__lowerCAmelCase , num=1 )
_lowerCamelCase : List[str] = jax.random.normal(__lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCAmelCase , __lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise
_lowerCamelCase : List[str] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_lowerCamelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCAmelCase , state=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : DDPMSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , ):
"""simple docstring"""
return add_noise_common(state.common , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : DDPMSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , ):
"""simple docstring"""
return get_velocity_common(state.common , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __len__( self : List[Any] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = """Speech2TextFeatureExtractor"""
lowerCAmelCase__ : Union[str, Any] = """Speech2TextTokenizer"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : str = self.feature_extractor
a_ : Dict = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a_ : Any = kwargs.pop("raw_speech" )
else:
a_ : Tuple = kwargs.pop("audio" , _SCREAMING_SNAKE_CASE )
a_ : int = kwargs.pop("sampling_rate" , _SCREAMING_SNAKE_CASE )
a_ : Dict = kwargs.pop("text" , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
a_ : List[Any] = args[0]
a_ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a_ : int = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
a_ : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ : Any = encodings["input_ids"]
return inputs
def A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def A ( self ) -> List[Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a_ : Tuple = True
a_ : Tuple = self.tokenizer
yield
a_ : int = self.feature_extractor
a_ : str = False
| 473
| 0
|
"""simple docstring"""
def lowercase ( A_ , A_ , A_ , A_=None )-> Optional[Any]:
'''simple docstring'''
a : List[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
a : str = True, True
a : Tuple = dfs(A_ , A_ , A_ , A_ )
return path
def lowercase ( A_ , A_ )-> List[Any]:
'''simple docstring'''
a : Dict = 0
a : Optional[int] = -1
for i in range(A_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
a : str = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowercase ( A_ , A_ )-> List[Any]:
'''simple docstring'''
a : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
a : Union[str, Any] = check_circuit_or_path(A_ , A_ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
a : Any = 1
if check == 2:
a : List[str] = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
a : Tuple = dfs(A_ , A_ , A_ )
print(A_ )
def lowercase ( )-> str:
'''simple docstring'''
a : Any = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
a : Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
a : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
a : Union[str, Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
a : Optional[int] = {
1: [],
2: []
# all degree is zero
}
a : Optional[Any] = 10
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
if __name__ == "__main__":
main()
| 703
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = """T5Config"""
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[int] = """mt5"""
UpperCAmelCase : Optional[int] = MTaConfig
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """mt5"""
UpperCAmelCase : List[str] = MTaConfig
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Dict = """mt5"""
UpperCAmelCase : str = MTaConfig
| 135
| 0
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
SCREAMING_SNAKE_CASE : Union[str, Any] = 8
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=BITS ):
UpperCamelCase_ : str = x.device
UpperCamelCase_ : Optional[Any] = (x * 255).int().clamp(0 , 255 )
UpperCamelCase_ : int = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Tuple = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" )
UpperCamelCase_ : Tuple = rearrange(_SCREAMING_SNAKE_CASE , """b c h w -> b c 1 h w""" )
UpperCamelCase_ : Union[str, Any] = ((x & mask) != 0).float()
UpperCamelCase_ : str = rearrange(_SCREAMING_SNAKE_CASE , """b c d h w -> b (c d) h w""" )
UpperCamelCase_ : Optional[Any] = bits * 2 - 1
return bits
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict=BITS ):
UpperCamelCase_ : str = x.device
UpperCamelCase_ : Optional[int] = (x > 0).int()
UpperCamelCase_ : Dict = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )
UpperCamelCase_ : List[str] = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" )
UpperCamelCase_ : List[str] = rearrange(_SCREAMING_SNAKE_CASE , """b (c d) h w -> b c d h w""" , d=8 )
UpperCamelCase_ : str = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def lowerCAmelCase_ ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCamelCase_ : str = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCamelCase_ : List[str] = self.alphas_cumprod[timestep]
UpperCamelCase_ : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCamelCase_ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase_ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCamelCase_ : int = self.bit_scale
if self.config.clip_sample:
UpperCamelCase_ : str = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCamelCase_ : List[str] = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCamelCase_ : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase_ : List[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase_ : int = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCamelCase_ : Optional[Any] = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else """cpu"""
UpperCamelCase_ : Optional[Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[str] = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise
UpperCamelCase_ : Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : int="epsilon" , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : bool = True , ):
UpperCamelCase_ : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCamelCase_,UpperCamelCase_ : Any = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
UpperCamelCase_ : str = None
# 1. compute alphas, betas
UpperCamelCase_ : Dict = self.alphas_cumprod[t]
UpperCamelCase_ : Optional[int] = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCamelCase_ : Any = 1 - alpha_prod_t
UpperCamelCase_ : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCamelCase_ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCamelCase_ : int = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
UpperCamelCase_ : List[str] = self.bit_scale
if self.config.clip_sample:
UpperCamelCase_ : Any = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase_ : int = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCamelCase_ : str = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase_ : Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase_ : Any = 0
if t > 0:
UpperCamelCase_ : List[str] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device )
UpperCamelCase_ : Optional[int] = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise
UpperCamelCase_ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
class UpperCamelCase ( __a ):
def __init__(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1.0 , ) -> List[Any]:
super().__init__()
UpperCamelCase_ : List[str] = bit_scale
UpperCamelCase_ : Optional[int] = (
ddim_bit_scheduler_step if isinstance(__UpperCamelCase , __UpperCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__(self , __UpperCamelCase = 256 , __UpperCamelCase = 256 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCamelCase_ : str = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__UpperCamelCase , )
UpperCamelCase_ : List[str] = decimal_to_bits(__UpperCamelCase ) * self.bit_scale
UpperCamelCase_ : Tuple = latents.to(self.device )
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCamelCase_ : Tuple = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ : Dict = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
UpperCamelCase_ : Optional[int] = bits_to_decimal(__UpperCamelCase )
if output_type == "pil":
UpperCamelCase_ : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 635
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( __a ):
def A_ (self ) -> Any:
UpperCamelCase_ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , """num_attention_heads""" ) )
class UpperCamelCase :
def __init__(self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=64 , __UpperCamelCase=3 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=16 , __UpperCamelCase=[128, 256, 384] , __UpperCamelCase=[4, 6, 8] , __UpperCamelCase=[2, 3, 4] , __UpperCamelCase=[16, 16, 16] , __UpperCamelCase=0 , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=2 , ) -> Optional[int]:
UpperCamelCase_ : Tuple = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : Dict = image_size
UpperCamelCase_ : Dict = num_channels
UpperCamelCase_ : Optional[Any] = kernel_size
UpperCamelCase_ : int = stride
UpperCamelCase_ : str = padding
UpperCamelCase_ : Tuple = hidden_sizes
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : List[str] = depths
UpperCamelCase_ : Dict = key_dim
UpperCamelCase_ : Any = drop_path_rate
UpperCamelCase_ : List[Any] = patch_size
UpperCamelCase_ : Any = attention_ratio
UpperCamelCase_ : Optional[Any] = mlp_ratio
UpperCamelCase_ : Optional[int] = initializer_range
UpperCamelCase_ : Optional[Any] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase_ : Tuple = is_training
UpperCamelCase_ : Any = use_labels
UpperCamelCase_ : Dict = num_labels
UpperCamelCase_ : List[str] = initializer_range
def A_ (self ) -> Dict:
UpperCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : List[str] = None
if self.use_labels:
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A_ (self ) -> Optional[int]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
UpperCamelCase_ : int = LevitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : List[Any] = model(__UpperCamelCase )
UpperCamelCase_ : int = (self.image_size, self.image_size)
UpperCamelCase_,UpperCamelCase_ : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ : Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase_ : List[Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
UpperCamelCase_ : List[str] = self.num_labels
UpperCamelCase_ : Any = LevitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : int = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ (self ) -> str:
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : Any = config_and_inputs
UpperCamelCase_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __a , __a , unittest.TestCase ):
a__ :Any = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
a__ :str = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ :Optional[Any] = False
a__ :Optional[int] = False
a__ :Tuple = False
a__ :List[str] = False
a__ :Dict = False
def A_ (self ) -> List[Any]:
UpperCamelCase_ : int = LevitModelTester(self )
UpperCamelCase_ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def A_ (self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ (self ) -> Optional[Any]:
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def A_ (self ) -> int:
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def A_ (self ) -> Any:
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def A_ (self ) -> List[str]:
pass
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_,UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(__UpperCamelCase )
UpperCamelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Dict = [*signature.parameters.keys()]
UpperCamelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def A_ (self ) -> Optional[Any]:
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ : Optional[Any] = outputs.hidden_states
UpperCamelCase_ : Optional[int] = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase_ : Tuple = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase_,UpperCamelCase_ : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ : Dict = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase_ : List[str] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Union[str, Any] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A_ (self ) -> Dict:
pass
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
UpperCamelCase_ : List[str] = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A_ (self ) -> Tuple:
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def A_ (self ) -> List[Any]:
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def A_ (self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_,UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : List[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase_ : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
UpperCamelCase_ : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCamelCase_ : int = model(**__UpperCamelCase ).loss
loss.backward()
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : str = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase_ : str = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
UpperCamelCase_ : Optional[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Any = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCamelCase_ : Any = problem_type["""title"""]
UpperCamelCase_ : Dict = problem_type["""num_labels"""]
UpperCamelCase_ : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
UpperCamelCase_ : Optional[int] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase_ : Union[str, Any] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
UpperCamelCase_ : Tuple = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
UpperCamelCase_ : str = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A_ (self ) -> Dict:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Any = LevitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def A_ (self ) -> Any:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A_ (self ) -> str:
UpperCamelCase_ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = self.default_image_processor
UpperCamelCase_ : List[str] = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ : Any = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 635
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'text': Value('string' )} )
_snake_case : ClassVar[Features] = Features({'summary': Value('string' )} )
_snake_case : str = "text"
_snake_case : str = "summary"
@property
def A ( self : Dict )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 228
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] , A_ : str )-> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
__UpperCamelCase = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def A ( self : Tuple )-> int:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict )-> int:
__UpperCamelCase = "sgugger/tiny-distilbert-classification"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , torchscript=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def A ( self : Optional[Any] )-> Union[str, Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , fpaa=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict )-> Tuple:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
__UpperCamelCase = None
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] )-> str:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def A ( self : List[Any] )-> List[Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A_ , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple )-> Union[str, Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any )-> List[str]:
__UpperCamelCase = "sshleifer/tinier_bart"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Tuple )-> Optional[int]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = "sshleifer/tinier_bart"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : int )-> Optional[Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(A_ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(A_ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(A_ , "train_time.csv" ) , env_info_csv_file=os.path.join(A_ , "env.csv" ) , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "env.csv" ) ).exists() )
def A ( self : List[Any] )-> str:
__UpperCamelCase = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(A_ : List[str] ):
self.assertTrue(hasattr(A_ , "sequential" ) )
self.assertTrue(hasattr(A_ , "cumulative" ) )
self.assertTrue(hasattr(A_ , "current" ) )
self.assertTrue(hasattr(A_ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , "log.txt" ) , log_print=A_ , trace_memory_line_by_line=A_ , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ , "log.txt" ) ).exists() )
| 228
| 1
|
'''simple docstring'''
def _snake_case ( A_ : Optional[Any] , A_ : Union[str, Any] , A_ : Tuple = 0 , A_ : Optional[Any] = 0 ):
"""simple docstring"""
a_ : List[str] = right or len(_UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_UpperCamelCase , _UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase ( _UpperCamelCase ) -> str:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase ) -> Any:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase__: Optional[int] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_UpperCamelCase , id=_UpperCamelCase )
| 306
| 0
|
_lowercase : List[Any] =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowercase : List[str] =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowercase : Union[str, Any] ={
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def A__ ( lowercase: Optional[Any], lowercase: Optional[Any], lowercase: Optional[Any] ) -> str:
assert len(str(lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
A : Tuple =year // 100
A : Union[str, Any] =(5 * (century % 4) + 2) % 7
A : List[str] =year % 100
A : Union[str, Any] =centurian % 12
A : Dict =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
A : Any =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
A : List[str] =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661
| 0
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
snake_case = datasets.logging.get_logger(__name__)
snake_case = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
snake_case = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
snake_case = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def lowerCamelCase__ ( lowercase , lowercase , lowercase=False , lowercase=False , lowercase=True , lowercase=False , lowercase="dummy_doc" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {doc: key_lines}
SCREAMING_SNAKE_CASE : Optional[Any] = {doc: sys_lines}
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = reader.get_doc_mentions(_UpperCamelCase , key_doc_lines[doc] , _UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE : Any = reader.set_annotated_parse_trees(_UpperCamelCase , key_doc_lines[doc] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = reader.get_doc_mentions(_UpperCamelCase , sys_doc_lines[doc] , _UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE : Union[str, Any] = reader.set_annotated_parse_trees(_UpperCamelCase , key_doc_lines[doc] , _UpperCamelCase , _UpperCamelCase )
if remove_nested:
SCREAMING_SNAKE_CASE : Any = reader.remove_nested_coref_mentions(_UpperCamelCase , _UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE : Dict = reader.remove_nested_coref_mentions(_UpperCamelCase , _UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE : List[str] = reader.get_mention_assignments(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = reader.get_mention_assignments(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE : str = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_coref_infos(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : str = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE : Dict = evaluator.evaluate_documents(_UpperCamelCase , _UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE : Any = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE : int = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE : List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def _A ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[int]=False ):
SCREAMING_SNAKE_CASE : str = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE : Any = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use \'min_span\'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE : Tuple = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 62
|
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase , _lowercase: Union[str, Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
_lowercase: Dict = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
A__ : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
A__ : Dict = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 353
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Dict ={'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str =[
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] =['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] =[
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCamelCase : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 237
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any]=0 ) -> Tuple:
'''simple docstring'''
if name is None:
__A : str = None
else:
__A : Any = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
__A : Tuple = fmt.format(_SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if msg is not None:
print(_SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(_SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
print(_SCREAMING_SNAKE_CASE , ':' , val.size() )
else:
print(_SCREAMING_SNAKE_CASE , ':' , _SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__A : Optional[int] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__A : Any = (num_heads, hidden_size, num_splits) + input_shape[1:]
__A : Dict = param.view(*_SCREAMING_SNAKE_CASE )
__A : Any = param.transpose(0 , 2 )
__A : Dict = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__A : Optional[int] = (num_heads, num_splits, hidden_size) + input_shape[1:]
__A : Union[str, Any] = param.view(*_SCREAMING_SNAKE_CASE )
__A : Any = param.transpose(0 , 1 ).contiguous()
__A : Any = param.view(*_SCREAMING_SNAKE_CASE )
return param
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
'''simple docstring'''
__A : List[str] = {}
# old versions did not store training args
__A : Union[str, Any] = input_state_dict.get('args' , _SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__A : List[str] = ds_args.padded_vocab_size
__A : Any = ds_args.max_position_embeddings
__A : int = ds_args.hidden_size
__A : Dict = ds_args.num_layers
__A : int = ds_args.num_attention_heads
__A : Optional[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__A : List[Any] = config.n_head
# The hidden_size per head.
__A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__A : str = input_state_dict['checkpoint_version']
else:
__A : Tuple = 0.0
# The model.
__A : Optional[Any] = input_state_dict['model']
# The language model.
__A : int = model['language_model']
# The embeddings.
__A : Dict = lm['embedding']
# The word embeddings.
__A : Optional[Any] = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
__A : Optional[Any] = word_embeddings[: config.vocab_size, :]
__A : Any = word_embeddings
# The position embeddings.
__A : Optional[Any] = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__A : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
__A : Optional[Any] = pos_embeddings
# The transformer.
__A : str = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
__A : Optional[Any] = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
__A : List[Any] = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__A : Any = layer_re.match(_SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__A : Optional[Any] = int(m.group(1 ) )
# The name of the operation.
__A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
__A : List[Any] = m.group(3 )
# The name of the layer.
__A : int = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
__A : List[Any] = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
__A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__A : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : Dict = causal_mask
# Insert a "dummy" tensor for masked_bias.
__A : str = torch.tensor(-1E4 , dtype=torch.floataa )
__A : List[str] = masked_bias
__A : Optional[Any] = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__A : List[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
__A : Optional[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__A : List[Any] = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Store. No change of shape.
__A : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__A : str = megatron_to_transformers[op_name]
__A : List[Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__A : List[Any] = megatron_to_transformers[op_name]
__A : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__A : Tuple = transformer['final_layernorm.weight']
__A : List[Any] = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
__A : Any = word_embeddings
# It should be done!
return output_state_dict
def _lowercase ( ) -> str:
'''simple docstring'''
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_SCREAMING_SNAKE_CASE , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_SCREAMING_SNAKE_CASE , help='An optional config json file describing the pre-trained model.' , )
__A : str = parser.parse_args()
# Extract the basename.
__A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
__A : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
else:
__A : Optional[int] = torch.load(args.path_to_checkpoint , map_location='cpu' )
__A : Optional[Any] = input_state_dict.get('args' , _SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__A : Optional[Any] = 'gelu_fast'
elif ds_args.openai_gelu:
__A : List[Any] = 'gelu_new'
else:
__A : str = 'gelu'
else:
# in the very early days this used to be "gelu_new"
__A : int = 'gelu_new'
# Spell out all parameters in case the defaults change.
__A : Dict = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_SCREAMING_SNAKE_CASE , summary_activation=_SCREAMING_SNAKE_CASE , summary_proj_to_labels=_SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=50256 , eos_token_id=50256 , )
else:
__A : Optional[Any] = GPTaConfig.from_json_file(args.config_file )
__A : Union[str, Any] = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
__A : Optional[int] = convert_megatron_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__A : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__A : Dict = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
__A : Union[str, Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
__A : str = 'gpt2'
__A : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__A : str = type(_SCREAMING_SNAKE_CASE ).__name__
__A : Dict = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
__A : str = os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 237
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.