code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import string
import numpy
def _a ( lowerCamelCase, lowerCamelCase ):
return b if a == 0 else greatest_common_divisor(b % a, lowerCamelCase )
class A__ :
_UpperCAmelCase : Tuple = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_UpperCAmelCase : List[str] = numpy.vectorize(lambda __SCREAMING_SNAKE_CASE: x % 36)
_UpperCAmelCase : Optional[Any] = numpy.vectorize(__SCREAMING_SNAKE_CASE)
def __init__( self , __magic_name__ ):
lowerCamelCase : Tuple = self.modulus(__magic_name__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCamelCase : List[str] = encrypt_key.shape[0]
def UpperCamelCase__ ( self , __magic_name__ ):
return self.key_string.index(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
return self.key_string[round(__magic_name__ )]
def UpperCamelCase__ ( self ):
lowerCamelCase : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase : Union[str, Any] = det % len(self.key_string )
lowerCamelCase : Dict = len(self.key_string )
if greatest_common_divisor(__magic_name__ , len(self.key_string ) ) != 1:
lowerCamelCase : List[Any] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : str = [char for char in text.upper() if char in self.key_string]
lowerCamelCase : Optional[int] = chars[-1]
while len(__magic_name__ ) % self.break_key != 0:
chars.append(__magic_name__ )
return "".join(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : List[Any] = self.process_text(text.upper() )
lowerCamelCase : Optional[int] = """"""
for i in range(0 , len(__magic_name__ ) - self.break_key + 1 , self.break_key ):
lowerCamelCase : int = text[i : i + self.break_key]
lowerCamelCase : Tuple = [self.replace_letters(__magic_name__ ) for char in batch]
lowerCamelCase : Dict = numpy.array([vec] ).T
lowerCamelCase : List[str] = self.modulus(self.encrypt_key.dot(__magic_name__ ) ).T.tolist()[
0
]
lowerCamelCase : Dict = """""".join(
self.replace_digits(__magic_name__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase : Optional[int] = det % len(self.key_string )
lowerCamelCase : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCamelCase : List[str] = i
break
lowerCamelCase : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : List[Any] = self.make_decrypt_key()
lowerCamelCase : Optional[int] = self.process_text(text.upper() )
lowerCamelCase : List[str] = """"""
for i in range(0 , len(__magic_name__ ) - self.break_key + 1 , self.break_key ):
lowerCamelCase : Union[str, Any] = text[i : i + self.break_key]
lowerCamelCase : Union[str, Any] = [self.replace_letters(__magic_name__ ) for char in batch]
lowerCamelCase : int = numpy.array([vec] ).T
lowerCamelCase : Dict = self.modulus(decrypt_key.dot(__magic_name__ ) ).T.tolist()[0]
lowerCamelCase : List[str] = """""".join(
self.replace_digits(__magic_name__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _a ( ):
lowerCamelCase : List[str] = int(input("""Enter the order of the encryption key: """ ) )
lowerCamelCase : int = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowerCamelCase ):
lowerCamelCase : int = [int(lowerCamelCase ) for x in input().split()]
hill_matrix.append(lowerCamelCase )
lowerCamelCase : Optional[Any] = HillCipher(numpy.array(lowerCamelCase ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
lowerCamelCase : List[str] = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
lowerCamelCase : Union[str, Any] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowerCamelCase ) )
elif option == "2":
lowerCamelCase : Tuple = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 681 |
from __future__ import annotations
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = str(lowerCamelCase )
return n == n[::-1]
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = 0
for i in range(1, lowerCamelCase ):
if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 681 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_lowerCamelCase =open # noqa: we just need to have a builtin inside this module to test it properly
| 681 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[Any] = """"""
else:
lowerCamelCase : Optional[int] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : Any = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCamelCase : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = dct.pop(lowerCamelCase )
lowerCamelCase : str = val
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = ViTMSNConfig()
lowerCamelCase : Tuple = 1000
lowerCamelCase : List[Any] = """datasets/huggingface/label-files"""
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) )
lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[int] = idalabel
lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase : int = 384
lowerCamelCase : Optional[int] = 1536
lowerCamelCase : Tuple = 6
elif "l16" in checkpoint_url:
lowerCamelCase : Dict = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Optional[int] = 24
lowerCamelCase : str = 16
lowerCamelCase : str = 0.1
elif "b4" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowerCamelCase : Tuple = 7
lowerCamelCase : Optional[int] = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Tuple = 24
lowerCamelCase : Dict = 16
lowerCamelCase : str = 0.1
lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase )
lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""]
lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase )
lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
lowerCamelCase : Union[str, Any] = ViTImageProcessor(
size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase )
lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : int = model(**lowerCamelCase )
lowerCamelCase : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 681 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class A__ ( unittest.TestCase):
_UpperCAmelCase : List[Any] = JukeboxTokenizer
_UpperCAmelCase : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase__ ( self ):
import torch
lowerCamelCase : Dict = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
lowerCamelCase : List[str] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCamelCase : Any = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCamelCase__ ( self ):
import torch
lowerCamelCase : Union[str, Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
lowerCamelCase : Any = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCamelCase : str = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 681 |
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
from ...configuration_utils import PretrainedConfig
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = """bert-generation"""
def __init__( self , __magic_name__=5_0_3_5_8 , __magic_name__=1_0_2_4 , __magic_name__=2_4 , __magic_name__=1_6 , __magic_name__=4_0_9_6 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__=2 , __magic_name__=1 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : str = vocab_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : str = hidden_act
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : int = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Union[str, Any] = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
| 681 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _a ( lowerCamelCase = "laptop" ):
lowerCamelCase : Union[str, Any] = F'''https://www.amazon.in/laptop/s?k={product}'''
lowerCamelCase : Optional[int] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCamelCase : Union[str, Any] = BeautifulSoup(requests.get(lowerCamelCase, headers=lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
lowerCamelCase : Any = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""", attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""}, ), soup.find_all("""div""", attrs={"""class""": """a-row a-size-base a-color-base"""} ), ):
try:
lowerCamelCase : List[str] = item.ha.text
lowerCamelCase : Tuple = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCamelCase : List[str] = item.find("""span""", attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCamelCase : Dict = item.find("""span""", attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCamelCase : Optional[Any] = """Not available"""
try:
lowerCamelCase : Dict = (
"""₹"""
+ item.find(
"""span""", attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCamelCase : List[str] = """"""
try:
lowerCamelCase : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""", """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""", """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""", """""" ) )
)
* 100 )
except ValueError:
lowerCamelCase : int = float("""nan""" )
except AttributeError:
pass
lowerCamelCase : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCamelCase : Union[str, Any] = """ """
lowerCamelCase : int = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowerCamelCase ="""headphones"""
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 681 |
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = (DPMSolverSinglestepScheduler,)
_UpperCAmelCase : str = (("""num_inference_steps""", 25),)
def UpperCamelCase__ ( self , **__magic_name__ ):
lowerCamelCase : Any = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**__magic_name__ )
return config
def UpperCamelCase__ ( self , __magic_name__=0 , **__magic_name__ ):
lowerCamelCase : Optional[Any] = dict(self.forward_default_kwargs )
lowerCamelCase : Union[str, Any] = kwargs.pop("""num_inference_steps""" , __magic_name__ )
lowerCamelCase : int = self.dummy_sample
lowerCamelCase : int = 0.1 * sample
lowerCamelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Tuple = self.get_scheduler_config(**__magic_name__ )
lowerCamelCase : str = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
lowerCamelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
lowerCamelCase : str = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
lowerCamelCase : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase , lowerCamelCase : List[str] = sample, sample
for t in range(__magic_name__ , time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase : Tuple = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowerCamelCase : Dict = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self , __magic_name__=0 , **__magic_name__ ):
lowerCamelCase : List[str] = dict(self.forward_default_kwargs )
lowerCamelCase : str = kwargs.pop("""num_inference_steps""" , __magic_name__ )
lowerCamelCase : int = self.dummy_sample
lowerCamelCase : List[str] = 0.1 * sample
lowerCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Optional[Any] = self.get_scheduler_config()
lowerCamelCase : List[str] = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
lowerCamelCase : Any = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase : Dict = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowerCamelCase : Optional[Any] = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , __magic_name__=None , **__magic_name__ ):
if scheduler is None:
lowerCamelCase : str = self.scheduler_classes[0]
lowerCamelCase : List[Any] = self.get_scheduler_config(**__magic_name__ )
lowerCamelCase : List[Any] = scheduler_class(**__magic_name__ )
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : str = self.get_scheduler_config(**__magic_name__ )
lowerCamelCase : Optional[int] = scheduler_class(**__magic_name__ )
lowerCamelCase : Union[str, Any] = 1_0
lowerCamelCase : List[str] = self.dummy_model()
lowerCamelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Any = model(__magic_name__ , __magic_name__ )
lowerCamelCase : Dict = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase : str = 5_0
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCamelCase : str = model(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[int] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
lowerCamelCase : Any = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1e-3
def UpperCamelCase__ ( self ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def UpperCamelCase__ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCamelCase : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase : Optional[Any] = self.full_loop(scheduler=__magic_name__ )
lowerCamelCase : int = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
lowerCamelCase : List[Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase : List[Any] = self.full_loop(scheduler=__magic_name__ )
lowerCamelCase : List[Any] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def UpperCamelCase__ ( self ):
self.check_over_configs(thresholding=__magic_name__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__magic_name__ , prediction_type=__magic_name__ , sample_max_value=__magic_name__ , algorithm_type="""dpmsolver++""" , solver_order=__magic_name__ , solver_type=__magic_name__ , )
def UpperCamelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def UpperCamelCase__ ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__magic_name__ , solver_type=__magic_name__ , prediction_type=__magic_name__ , algorithm_type=__magic_name__ , )
lowerCamelCase : Tuple = self.full_loop(
solver_order=__magic_name__ , solver_type=__magic_name__ , prediction_type=__magic_name__ , algorithm_type=__magic_name__ , )
assert not torch.isnan(__magic_name__ ).any(), "Samples have nan numbers"
def UpperCamelCase__ ( self ):
self.check_over_configs(lower_order_final=__magic_name__ )
self.check_over_configs(lower_order_final=__magic_name__ )
def UpperCamelCase__ ( self ):
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCamelCase__ ( self ):
self.check_over_configs(variance_type=__magic_name__ )
self.check_over_configs(variance_type="""learned_range""" )
def UpperCamelCase__ ( self ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.full_loop()
lowerCamelCase : int = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.full_loop(use_karras_sigmas=__magic_name__ )
lowerCamelCase : List[Any] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.full_loop(prediction_type="""v_prediction""" )
lowerCamelCase : Tuple = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=__magic_name__ )
lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.scheduler_classes[0]
lowerCamelCase : List[Any] = self.get_scheduler_config(thresholding=__magic_name__ , dynamic_thresholding_ratio=0 )
lowerCamelCase : Dict = scheduler_class(**__magic_name__ )
lowerCamelCase : Union[str, Any] = 1_0
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : List[str] = model(__magic_name__ , __magic_name__ )
lowerCamelCase : Tuple = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
assert sample.dtype == torch.floataa
| 681 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ):
lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Union[str, Any] = max_resolution
lowerCamelCase : Union[str, Any] = do_resize
lowerCamelCase : int = size
lowerCamelCase : int = do_center_crop
lowerCamelCase : Union[str, Any] = crop_size
lowerCamelCase : Union[str, Any] = do_normalize
lowerCamelCase : Dict = image_mean
lowerCamelCase : Optional[Any] = image_std
lowerCamelCase : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase : Dict = []
for i in range(self.batch_size ):
lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ )
lowerCamelCase : Any = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 681 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=0 ):
# Format the message.
if name is None:
lowerCamelCase : int = None
else:
lowerCamelCase : Union[str, Any] = """.""" * max(0, spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowerCamelCase : Optional[Any] = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase, lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase, val[k], spaces + 2 )
elif isinstance(lowerCamelCase, torch.Tensor ):
print(lowerCamelCase, """:""", val.size() )
else:
print(lowerCamelCase, """:""", lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCamelCase : Optional[int] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCamelCase : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCamelCase : Dict = param.view(*lowerCamelCase )
lowerCamelCase : Optional[int] = param.transpose(0, 2 )
lowerCamelCase : Optional[int] = param.transpose(1, 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCamelCase : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCamelCase : Union[str, Any] = param.view(*lowerCamelCase )
lowerCamelCase : List[Any] = param.transpose(0, 1 ).contiguous()
lowerCamelCase : Optional[Any] = param.view(*lowerCamelCase )
return param
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# The converted output model.
lowerCamelCase : Optional[int] = {}
# old versions did not store training args
lowerCamelCase : Dict = input_state_dict.get("""args""", lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCamelCase : Any = ds_args.padded_vocab_size
lowerCamelCase : Optional[Any] = ds_args.max_position_embeddings
lowerCamelCase : Any = ds_args.hidden_size
lowerCamelCase : Tuple = ds_args.num_layers
lowerCamelCase : Optional[int] = ds_args.num_attention_heads
lowerCamelCase : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCamelCase : Optional[Any] = config.n_head
# The hidden_size per head.
lowerCamelCase : Optional[int] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCamelCase : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
lowerCamelCase : Any = 0.0
# The model.
lowerCamelCase : Union[str, Any] = input_state_dict["""model"""]
# The language model.
lowerCamelCase : List[Any] = model["""language_model"""]
# The embeddings.
lowerCamelCase : Any = lm["""embedding"""]
# The word embeddings.
lowerCamelCase : List[Any] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowerCamelCase : Optional[Any] = word_embeddings[: config.vocab_size, :]
lowerCamelCase : Optional[Any] = word_embeddings
# The position embeddings.
lowerCamelCase : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCamelCase : int = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCamelCase : List[Any] = pos_embeddings
# The transformer.
lowerCamelCase : List[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowerCamelCase : int = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowerCamelCase : List[Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCamelCase : Union[str, Any] = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCamelCase : List[Any] = int(m.group(1 ) )
# The name of the operation.
lowerCamelCase : Dict = m.group(2 )
# Is it a weight or a bias?
lowerCamelCase : Any = m.group(3 )
# The name of the layer.
lowerCamelCase : str = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowerCamelCase : int = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowerCamelCase : Dict = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCamelCase : List[str] = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.floataa ) ).view(
1, 1, lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCamelCase : List[Any] = torch.tensor(-1e4, dtype=torch.floataa )
lowerCamelCase : int = masked_bias
lowerCamelCase : Union[str, Any] = fix_query_key_value_ordering(lowerCamelCase, lowerCamelCase, 3, lowerCamelCase, lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCamelCase : Optional[Any] = out_val.transpose(0, 1 ).contiguous()
# Store.
lowerCamelCase : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCamelCase : Optional[int] = fix_query_key_value_ordering(lowerCamelCase, lowerCamelCase, 3, lowerCamelCase, lowerCamelCase )
# Store. No change of shape.
lowerCamelCase : Union[str, Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCamelCase : int = megatron_to_transformers[op_name]
lowerCamelCase : List[str] = val.transpose(0, 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCamelCase : Union[str, Any] = megatron_to_transformers[op_name]
lowerCamelCase : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCamelCase : Optional[Any] = transformer["""final_layernorm.weight"""]
lowerCamelCase : Tuple = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCamelCase : str = word_embeddings
# It should be done!
return output_state_dict
def _a ( ):
# Create the argument parser.
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""", action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""", type=lowerCamelCase, help="""Path to the checkpoint file (.zip archive or direct .pt file)""", )
parser.add_argument(
"""--config_file""", default="""""", type=lowerCamelCase, help="""An optional config json file describing the pre-trained model.""", )
lowerCamelCase : Dict = parser.parse_args()
# Extract the basename.
lowerCamelCase : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint, """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowerCamelCase : int = torch.load(lowerCamelCase, map_location="""cpu""" )
else:
lowerCamelCase : Optional[int] = torch.load(args.path_to_checkpoint, map_location="""cpu""" )
lowerCamelCase : List[Any] = input_state_dict.get("""args""", lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCamelCase : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
lowerCamelCase : Dict = """gelu_new"""
else:
lowerCamelCase : Any = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowerCamelCase : Tuple = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowerCamelCase : Union[str, Any] = GPTaConfig(
vocab_size=5_0257, n_positions=1024, n_embd=1024, n_layer=24, n_head=16, n_inner=4096, activation_function=lowerCamelCase, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.0_2, summary_type="""cls_index""", summary_use_proj=lowerCamelCase, summary_activation=lowerCamelCase, summary_proj_to_labels=lowerCamelCase, summary_first_dropout=0.1, scale_attn_weights=lowerCamelCase, use_cache=lowerCamelCase, bos_token_id=5_0256, eos_token_id=5_0256, )
else:
lowerCamelCase : List[Any] = GPTaConfig.from_json_file(args.config_file )
lowerCamelCase : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowerCamelCase : Any = convert_megatron_checkpoint(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase, lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCamelCase : Optional[int] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCamelCase : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowerCamelCase : Tuple = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCamelCase : Dict = """gpt2"""
lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCamelCase : int = type(lowerCamelCase ).__name__
lowerCamelCase : Tuple = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCamelCase : Optional[int] = os.path.join(lowerCamelCase, """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase, lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 681 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681 | 1 |
from __future__ import annotations
import queue
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : Any = data
lowerCamelCase : Dict = None
lowerCamelCase : str = None
def _a ( ):
print("""\n********Press N to stop entering at any point of time********\n""" )
lowerCamelCase : Optional[Any] = input("""Enter the value of the root node: """ ).strip().lower()
lowerCamelCase : queue.Queue = queue.Queue()
lowerCamelCase : str = TreeNode(int(lowerCamelCase ) )
q.put(lowerCamelCase )
while not q.empty():
lowerCamelCase : Dict = q.get()
lowerCamelCase : Any = F'''Enter the left node of {node_found.data}: '''
lowerCamelCase : int = input(lowerCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCamelCase : Tuple = TreeNode(int(lowerCamelCase ) )
lowerCamelCase : List[Any] = left_node
q.put(lowerCamelCase )
lowerCamelCase : Union[str, Any] = F'''Enter the right node of {node_found.data}: '''
lowerCamelCase : List[Any] = input(lowerCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCamelCase : Dict = TreeNode(int(lowerCamelCase ) )
lowerCamelCase : Dict = right_node
q.put(lowerCamelCase )
raise
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ) or not node:
return
print(node.data, end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data, end=""",""" )
in_order(node.right )
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data, end=""",""" )
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ) or not node:
return
lowerCamelCase : queue.Queue = queue.Queue()
q.put(lowerCamelCase )
while not q.empty():
lowerCamelCase : Any = q.get()
print(node_dequeued.data, end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ) or not node:
return
lowerCamelCase : queue.Queue = queue.Queue()
q.put(lowerCamelCase )
while not q.empty():
lowerCamelCase : Any = []
while not q.empty():
lowerCamelCase : Tuple = q.get()
print(node_dequeued.data, end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCamelCase )
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ) or not node:
return
lowerCamelCase : list[TreeNode] = []
lowerCamelCase : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data, end=""",""" )
stack.append(lowerCamelCase )
lowerCamelCase : Optional[Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase : List[str] = stack.pop()
# start to traverse its right child
lowerCamelCase : Any = n.right
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ) or not node:
return
lowerCamelCase : list[TreeNode] = []
lowerCamelCase : Optional[int] = node
while n or stack:
while n:
stack.append(lowerCamelCase )
lowerCamelCase : int = n.left
lowerCamelCase : Dict = stack.pop()
print(n.data, end=""",""" )
lowerCamelCase : List[str] = n.right
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ) or not node:
return
lowerCamelCase , lowerCamelCase : List[Any] = [], []
lowerCamelCase : Tuple = node
stacka.append(lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase : Any = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data, end=""",""" )
def _a ( lowerCamelCase = "", lowerCamelCase=50, lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase , lowerCamelCase : Union[str, Any] = divmod(width - len(lowerCamelCase ) - 2, 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
_lowerCamelCase =build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 681 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 681 | 1 |
from math import factorial
def _a ( lowerCamelCase = 100 ):
return sum(map(lowerCamelCase, str(factorial(lowerCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 681 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681 | 1 |
def _a ( lowerCamelCase ):
if n_term == "":
return []
lowerCamelCase : list = []
for temp in range(int(lowerCamelCase ) ):
series.append(F'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_lowerCamelCase =input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 681 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : List[Any] = XLMProphetNetTokenizer
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Union[str, Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = XLMProphetNetTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """[PAD]"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 1_0_1_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = XLMProphetNetTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = """Hello World!"""
lowerCamelCase : List[Any] = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : str = {"""input_ids""": [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 681 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 681 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCamelCase ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCamelCase ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase : Any = numpy_to_pil(lowerCamelCase )
return images
def _a ( lowerCamelCase ):
if images.ndim == 3:
lowerCamelCase : Optional[Any] = images[None, ...]
lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images]
else:
lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 681 | 1 |
from math import factorial
def _a ( lowerCamelCase = 100 ):
return sum(int(lowerCamelCase ) for x in str(factorial(lowerCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 681 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 | 1 |
def _a ( lowerCamelCase ):
if not numbers:
return 0
if not isinstance(lowerCamelCase, (list, tuple) ) or not all(
isinstance(lowerCamelCase, lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
lowerCamelCase : Any = numbers[0]
for i in range(1, len(lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase : str = numbers[i]
if number < 0:
lowerCamelCase , lowerCamelCase : Dict = min_till_now, max_till_now
lowerCamelCase : List[Any] = max(lowerCamelCase, max_till_now * number )
lowerCamelCase : Dict = min(lowerCamelCase, min_till_now * number )
# update the maximum product found till now
lowerCamelCase : Any = max(lowerCamelCase, lowerCamelCase )
return max_prod
| 681 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="""▁"""
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = BertGenerationTokenizer
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """<s>"""
lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """Hello World!"""
lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase : str = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase : Dict = """ """.join(__magic_name__ )
lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = BertGenerationConfig()
lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 681 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
_lowerCamelCase =[int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def _a ( ):
lowerCamelCase : Optional[int] = os.path.dirname(os.path.realpath(lowerCamelCase ) )
lowerCamelCase : int = os.path.join(lowerCamelCase, """words.txt""" )
lowerCamelCase : int = """"""
with open(lowerCamelCase ) as f:
lowerCamelCase : Union[str, Any] = f.readline()
lowerCamelCase : List[str] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
lowerCamelCase : Tuple = [
word
for word in [sum(ord(lowerCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 681 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase =HfArgumentParser(InitializationArguments)
_lowerCamelCase =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 681 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : List[str] = StableDiffusionInpaintPipeline
_UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCAmelCase : Optional[Any] = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCAmelCase : Tuple = frozenset([])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=__magic_name__ , )
lowerCamelCase : Optional[int] = PNDMScheduler(skip_prk_steps=__magic_name__ )
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCamelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
lowerCamelCase : Dict = CLIPTextModel(__magic_name__ )
lowerCamelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase : Tuple = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((6_4, 6_4) )
lowerCamelCase : List[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(__magic_name__ ).startswith("""mps""" ):
lowerCamelCase : List[str] = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase : int = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : str = StableDiffusionInpaintPipeline(**__magic_name__ )
lowerCamelCase : Union[str, Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : str = sd_pipe(**__magic_name__ ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
lowerCamelCase : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase : str = StableDiffusionInpaintPipeline.from_pretrained(__magic_name__ , safety_checker=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
lowerCamelCase : int = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase : int = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , generator=__magic_name__ , output_type="""np""" , )
lowerCamelCase : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
lowerCamelCase : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
__magic_name__ , torch_dtype=torch.floataa , safety_checker=__magic_name__ , )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
lowerCamelCase : Dict = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase : Optional[int] = torch.manual_seed(0 )
lowerCamelCase : List[Any] = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , generator=__magic_name__ , output_type="""np""" , )
lowerCamelCase : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase : Dict = """stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase : Any = PNDMScheduler.from_pretrained(__magic_name__ , subfolder="""scheduler""" )
lowerCamelCase : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__magic_name__ , safety_checker=__magic_name__ , scheduler=__magic_name__ , torch_dtype=torch.floataa , )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Dict = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase : Dict = torch.manual_seed(0 )
lowerCamelCase : Optional[int] = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , generator=__magic_name__ , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 681 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self , __magic_name__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """sshleifer/tiny-gpt2"""
lowerCamelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """sgugger/tiny-distilbert-classification"""
lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """sshleifer/tiny-gpt2"""
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , )
lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , """sequential""" ) )
self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) )
self.assertTrue(hasattr(__magic_name__ , """current""" ) )
self.assertTrue(hasattr(__magic_name__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
| 681 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _a ( lowerCamelCase ):
return x + 2
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """x = 3"""
lowerCamelCase : Tuple = {}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
lowerCamelCase : Optional[int] = """x = y"""
lowerCamelCase : Tuple = {"""y""": 5}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """y = add_two(x)"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """x = 3"""
lowerCamelCase : Dict = {}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """x = 3\ny = 5"""
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """text = f'This is x: {x}.'"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowerCamelCase : Tuple = {"""x""": 3}
lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} )
lowerCamelCase : Tuple = {"""x""": 8}
lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = """test_list = [x, add_two(x)]"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """y = x"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowerCamelCase : Any = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowerCamelCase : Dict = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i"""
lowerCamelCase : int = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
| 681 | 1 |
import heapq
def _a ( lowerCamelCase ):
lowerCamelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase, [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCamelCase : Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCamelCase : List[Any] = heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCamelCase : List[str] = elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 681 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
class A__ ( nn.Module):
_UpperCAmelCase : int
_UpperCAmelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCAmelCase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ):
lowerCamelCase : str = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase : Optional[int] = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase : int = self.block_out_channels[i]
lowerCamelCase : Union[str, Any] = self.block_out_channels[i + 1]
lowerCamelCase : List[str] = nn.Conv(
__magic_name__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__magic_name__ )
lowerCamelCase : int = nn.Conv(
__magic_name__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__magic_name__ )
lowerCamelCase : Optional[Any] = blocks
lowerCamelCase : List[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __magic_name__ ):
lowerCamelCase : Tuple = self.conv_in(__magic_name__ )
lowerCamelCase : List[Any] = nn.silu(__magic_name__ )
for block in self.blocks:
lowerCamelCase : Union[str, Any] = block(__magic_name__ )
lowerCamelCase : Union[str, Any] = nn.silu(__magic_name__ )
lowerCamelCase : Dict = self.conv_out(__magic_name__ )
return embedding
@flax_register_to_config
class A__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = 32
_UpperCAmelCase : int = 4
_UpperCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCAmelCase : Union[bool, Tuple[bool]] = False
_UpperCAmelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCAmelCase : int = 2
_UpperCAmelCase : Union[int, Tuple[int]] = 8
_UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCAmelCase : int = 1280
_UpperCAmelCase : float = 0.0
_UpperCAmelCase : bool = False
_UpperCAmelCase : jnp.dtype = jnp.floataa
_UpperCAmelCase : bool = True
_UpperCAmelCase : int = 0
_UpperCAmelCase : str = "rgb"
_UpperCAmelCase : Tuple[int] = (16, 32, 96, 256)
def UpperCamelCase__ ( self , __magic_name__ ):
# init input tensors
lowerCamelCase : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase : Any = jnp.zeros(__magic_name__ , dtype=jnp.floataa )
lowerCamelCase : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase : Union[str, Any] = jnp.zeros(__magic_name__ , dtype=jnp.floataa )
lowerCamelCase , lowerCamelCase : Any = jax.random.split(__magic_name__ )
lowerCamelCase : List[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )["params"]
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.block_out_channels
lowerCamelCase : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase : List[Any] = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase : str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase : Optional[int] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase : str = FlaxTimestepEmbedding(__magic_name__ , dtype=self.dtype )
lowerCamelCase : str = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase : Optional[int] = self.only_cross_attention
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase : Tuple = []
lowerCamelCase : List[str] = []
lowerCamelCase : List[Any] = block_out_channels[0]
lowerCamelCase : Any = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__magic_name__ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase : Union[str, Any] = output_channel
lowerCamelCase : int = block_out_channels[i]
lowerCamelCase : List[Any] = i == len(__magic_name__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase : Dict = FlaxDownBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__magic_name__ )
for _ in range(self.layers_per_block ):
lowerCamelCase : Optional[int] = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__magic_name__ )
if not is_final_block:
lowerCamelCase : Optional[int] = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__magic_name__ )
lowerCamelCase : Union[str, Any] = down_blocks
lowerCamelCase : Dict = controlnet_down_blocks
# mid
lowerCamelCase : Dict = block_out_channels[-1]
lowerCamelCase : List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__magic_name__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase : Tuple = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 1.0 , __magic_name__ = True , __magic_name__ = False , ):
lowerCamelCase : Any = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase : Any = jnp.flip(__magic_name__ , axis=1 )
# 1. time
if not isinstance(__magic_name__ , jnp.ndarray ):
lowerCamelCase : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__magic_name__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase : List[str] = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase : Optional[Any] = jnp.expand_dims(__magic_name__ , 0 )
lowerCamelCase : Union[str, Any] = self.time_proj(__magic_name__ )
lowerCamelCase : str = self.time_embedding(__magic_name__ )
# 2. pre-process
lowerCamelCase : str = jnp.transpose(__magic_name__ , (0, 2, 3, 1) )
lowerCamelCase : Optional[Any] = self.conv_in(__magic_name__ )
lowerCamelCase : Optional[int] = jnp.transpose(__magic_name__ , (0, 2, 3, 1) )
lowerCamelCase : Optional[int] = self.controlnet_cond_embedding(__magic_name__ )
sample += controlnet_cond
# 3. down
lowerCamelCase : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase , lowerCamelCase : Optional[Any] = down_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train )
else:
lowerCamelCase , lowerCamelCase : Union[str, Any] = down_block(__magic_name__ , __magic_name__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase : str = self.mid_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train )
# 5. contronet blocks
lowerCamelCase : List[Any] = ()
for down_block_res_sample, controlnet_block in zip(__magic_name__ , self.controlnet_down_blocks ):
lowerCamelCase : Tuple = controlnet_block(__magic_name__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase : List[Any] = controlnet_down_block_res_samples
lowerCamelCase : Tuple = self.controlnet_mid_block(__magic_name__ )
# 6. scaling
lowerCamelCase : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__magic_name__ , mid_block_res_sample=__magic_name__ )
| 681 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCamelCase =logging.get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = question_encoder
lowerCamelCase : Dict = generator
lowerCamelCase : Tuple = self.question_encoder
def UpperCamelCase__ ( self , __magic_name__ ):
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" )
lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ )
if config is None:
lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ):
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.question_encoder
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.generator
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __magic_name__ , )
if max_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : int = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : Dict = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[Any] = labels["""input_ids"""]
return model_inputs
| 681 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A__ :
_UpperCAmelCase : CommonSchedulerState
# setable values
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : Optional[int] = None
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ ):
return cls(common=__magic_name__ , init_noise_sigma=__magic_name__ , timesteps=__magic_name__ )
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : DDPMSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCAmelCase : jnp.dtype
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 1_0_0_0 , __magic_name__ = 0.0_001 , __magic_name__ = 0.02 , __magic_name__ = "linear" , __magic_name__ = None , __magic_name__ = "fixed_small" , __magic_name__ = True , __magic_name__ = "epsilon" , __magic_name__ = jnp.floataa , ):
lowerCamelCase : Tuple = dtype
def UpperCamelCase__ ( self , __magic_name__ = None ):
if common is None:
lowerCamelCase : List[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
lowerCamelCase : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__magic_name__ , init_noise_sigma=__magic_name__ , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None ):
return sample
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase : List[str] = (jnp.arange(0 , __magic_name__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__magic_name__ , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
lowerCamelCase : Dict = state.common.alphas_cumprod[t]
lowerCamelCase : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase : List[Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase : Dict = jnp.clip(__magic_name__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase : Dict = jnp.log(jnp.clip(__magic_name__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase : List[str] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase : int = variance
lowerCamelCase : int = state.common.betas[t]
lowerCamelCase : List[Any] = (predicted_variance + 1) / 2
lowerCamelCase : int = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = True , ):
lowerCamelCase : Dict = timestep
if key is None:
lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase , lowerCamelCase : Any = jnp.split(__magic_name__ , sample.shape[1] , axis=1 )
else:
lowerCamelCase : Any = None
# 1. compute alphas, betas
lowerCamelCase : Tuple = state.common.alphas_cumprod[t]
lowerCamelCase : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCamelCase : Any = 1 - alpha_prod_t
lowerCamelCase : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase : List[str] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase : Optional[Any] = jnp.clip(__magic_name__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase : Dict = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase : str = jax.random.split(__magic_name__ , num=1 )
lowerCamelCase : List[Any] = jax.random.normal(__magic_name__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__magic_name__ , __magic_name__ , predicted_variance=__magic_name__ ) ** 0.5) * noise
lowerCamelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCamelCase : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
return add_noise_common(state.common , __magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
return get_velocity_common(state.common , __magic_name__ , __magic_name__ , __magic_name__ )
def __len__( self ):
return self.config.num_train_timesteps
| 681 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = F'''{sampling_rate}'''
lowerCamelCase : Optional[int] = """1"""
lowerCamelCase : Any = """f32le"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ):
lowerCamelCase : Dict = F'''{sampling_rate}'''
lowerCamelCase : List[Any] = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Any = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Dict = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase : Dict = platform.system()
if system == "Linux":
lowerCamelCase : Union[str, Any] = """alsa"""
lowerCamelCase : List[Any] = """default"""
elif system == "Darwin":
lowerCamelCase : List[Any] = """avfoundation"""
lowerCamelCase : List[Any] = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Any = """default"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase )
for item in iterator:
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ):
if stream_chunk_s is not None:
lowerCamelCase : int = stream_chunk_s
else:
lowerCamelCase : Dict = chunk_length_s
lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase : Optional[int] = np.intaa
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : int = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase, (int, float) ):
lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s]
lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : List[Any] = datetime.datetime.now()
lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase )
lowerCamelCase : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : Optional[int] = B""""""
lowerCamelCase , lowerCamelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : str = (_stride_left, stride_right)
lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : Optional[int] = False
yield item
lowerCamelCase : str = stride_left
lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[Any] = False
yield item
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 681 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_lowerCamelCase =random.Random()
def _a ( lowerCamelCase, lowerCamelCase=1.0, lowerCamelCase=None, lowerCamelCase=None ):
if rng is None:
lowerCamelCase : List[Any] = global_rng
lowerCamelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=4_0_0 , __magic_name__=2_0_0_0 , __magic_name__=1 , __magic_name__=0.0 , __magic_name__=1_6_0_0_0 , __magic_name__=True , __magic_name__=8_0 , __magic_name__=1_6 , __magic_name__=6_4 , __magic_name__="hann_window" , __magic_name__=8_0 , __magic_name__=7_6_0_0 , __magic_name__=1e-10 , __magic_name__=True , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : str = min_seq_length
lowerCamelCase : int = max_seq_length
lowerCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase : Optional[int] = feature_size
lowerCamelCase : Optional[Any] = padding_value
lowerCamelCase : List[str] = sampling_rate
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : str = num_mel_bins
lowerCamelCase : Optional[int] = hop_length
lowerCamelCase : Any = win_length
lowerCamelCase : List[str] = win_function
lowerCamelCase : Dict = fmin
lowerCamelCase : Optional[Any] = fmax
lowerCamelCase : Dict = mel_floor
lowerCamelCase : str = return_attention_mask
def UpperCamelCase__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False ):
def _flatten(__magic_name__ ):
return list(itertools.chain(*__magic_name__ ) )
if equal_length:
lowerCamelCase : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase : str = [np.asarray(__magic_name__ ) for x in speech_inputs]
return speech_inputs
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False ):
if equal_length:
lowerCamelCase : int = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase : int = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase : List[str] = [np.asarray(__magic_name__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : int = SpeechTaFeatureExtractor
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = SpeechTaFeatureExtractionTester(self )
def UpperCamelCase__ ( self , __magic_name__ ):
self.assertTrue(np.all(np.mean(__magic_name__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__magic_name__ , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase : str = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase : int = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowerCamelCase : List[str] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test batched
lowerCamelCase : Dict = feat_extract(__magic_name__ , return_tensors="""np""" ).input_values
lowerCamelCase : Tuple = feat_extract(__magic_name__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase : int = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase : List[Any] = [None, 1_6_0_0, None]
for max_length, padding in zip(__magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = feat_extract(__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , return_tensors="""np""" )
lowerCamelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : str = range(8_0_0 , 1_4_0_0 , 2_0_0 )
lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase : str = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase : int = [None, 1_6_0_0, None]
for max_length, padding in zip(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = feat_extract(__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ )
lowerCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase : Tuple = feat_extract(
__magic_name__ , truncation=__magic_name__ , max_length=1_0_0_0 , padding="""max_length""" , return_tensors="""np""" )
lowerCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase : Any = feat_extract(
__magic_name__ , truncation=__magic_name__ , max_length=1_0_0_0 , padding="""longest""" , return_tensors="""np""" )
lowerCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
lowerCamelCase : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase : Union[str, Any] = feat_extract(
__magic_name__ , truncation=__magic_name__ , max_length=2_0_0_0 , padding="""longest""" , return_tensors="""np""" )
lowerCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : Optional[Any] = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCamelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase : List[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase : Tuple = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase : Any = feature_extractor(audio_target=__magic_name__ , padding=__magic_name__ , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCamelCase : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
lowerCamelCase : int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test batched
lowerCamelCase : List[Any] = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_values
lowerCamelCase : List[Any] = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCamelCase : Tuple = np.asarray(__magic_name__ )
lowerCamelCase : Any = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_values
lowerCamelCase : Tuple = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase : int = feat_extract.model_input_names[0]
lowerCamelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__magic_name__ ) == len(__magic_name__ ) for x, y in zip(__magic_name__ , processed_features[input_name] ) ) )
lowerCamelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__magic_name__ )
lowerCamelCase : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
lowerCamelCase : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__magic_name__ )
lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase : Any = feat_extract.model_input_names[0]
lowerCamelCase : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
lowerCamelCase : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase : int = feat_extract.model_input_names[0]
lowerCamelCase : int = BatchFeature({input_name: speech_inputs} )
lowerCamelCase : List[str] = feat_extract.num_mel_bins # hack!
lowerCamelCase : Tuple = feat_extract.pad(__magic_name__ , padding="""longest""" , return_tensors="""np""" )[input_name]
lowerCamelCase : Any = feat_extract.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.feat_extract_dict
lowerCamelCase : Any = True
lowerCamelCase : int = self.feature_extraction_class(**__magic_name__ )
lowerCamelCase : int = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase : List[str] = [len(__magic_name__ ) for x in speech_inputs]
lowerCamelCase : Tuple = feat_extract.model_input_names[0]
lowerCamelCase : str = BatchFeature({input_name: speech_inputs} )
lowerCamelCase : Optional[int] = feat_extract.num_mel_bins # hack!
lowerCamelCase : Tuple = feat_extract.pad(__magic_name__ , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.feat_extract_dict
lowerCamelCase : List[str] = True
lowerCamelCase : List[str] = self.feature_extraction_class(**__magic_name__ )
lowerCamelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase : List[Any] = [len(__magic_name__ ) for x in speech_inputs]
lowerCamelCase : List[str] = feat_extract.model_input_names[0]
lowerCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase : Any = min(__magic_name__ )
lowerCamelCase : Optional[int] = feat_extract.num_mel_bins # hack!
lowerCamelCase : str = feat_extract.pad(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCamelCase__ ( self , __magic_name__ ):
from datasets import load_dataset
lowerCamelCase : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase : Dict = ds.sort("""id""" ).select(range(__magic_name__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : str = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
lowerCamelCase : Optional[Any] = self._load_datasamples(1 )
lowerCamelCase : Tuple = SpeechTaFeatureExtractor()
lowerCamelCase : Optional[int] = feature_extractor(__magic_name__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , __magic_name__ , atol=1e-6 ) )
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Dict = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
lowerCamelCase : Tuple = self._load_datasamples(1 )
lowerCamelCase : int = SpeechTaFeatureExtractor()
lowerCamelCase : List[str] = feature_extractor(audio_target=__magic_name__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __magic_name__ , atol=1e-4 ) )
| 681 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , )
assert hasattr(self , """env""" )
def UpperCamelCase__ ( self , __magic_name__ ):
# configuration for running training on smdistributed Model Parallel
lowerCamelCase : Any = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCamelCase : Any = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , )
def UpperCamelCase__ ( self , __magic_name__ ):
TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCamelCase__ ( self , __magic_name__ ):
# create estimator
lowerCamelCase : int = self.create_estimator(__magic_name__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
| 681 | 1 |
from __future__ import annotations
_lowerCamelCase =list[tuple[int, int]]
_lowerCamelCase =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCamelCase =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[Any] = pos_x
lowerCamelCase : Dict = pos_y
lowerCamelCase : str = (pos_y, pos_x)
lowerCamelCase : List[str] = goal_x
lowerCamelCase : List[Any] = goal_y
lowerCamelCase : Optional[Any] = g_cost
lowerCamelCase : str = parent
lowerCamelCase : Dict = self.calculate_heuristic()
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = abs(self.pos_x - self.goal_x )
lowerCamelCase : Any = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , __magic_name__ ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __magic_name__ )
lowerCamelCase : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , __magic_name__ )
lowerCamelCase : Any = [self.start]
lowerCamelCase : list[Node] = []
lowerCamelCase : Optional[int] = False
def UpperCamelCase__ ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase : List[str] = True
return self.retrace_path(__magic_name__ )
self.closed_nodes.append(__magic_name__ )
lowerCamelCase : Optional[int] = self.get_successors(__magic_name__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__magic_name__ )
else:
# retrieve the best current path
lowerCamelCase : Any = self.open_nodes.pop(self.open_nodes.index(__magic_name__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__magic_name__ )
else:
self.open_nodes.append(__magic_name__ )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Dict = []
for action in delta:
lowerCamelCase : str = parent.pos_x + action[1]
lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__magic_name__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__magic_name__ , __magic_name__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __magic_name__ , ) )
return successors
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Tuple = node
lowerCamelCase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_lowerCamelCase =(0, 0)
_lowerCamelCase =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
_lowerCamelCase =GreedyBestFirst(init, goal)
_lowerCamelCase =greedy_bf.search()
if path:
for pos_x, pos_y in path:
_lowerCamelCase =2
for elem in grid:
print(elem)
| 681 |
from __future__ import annotations
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = str(lowerCamelCase )
return n == n[::-1]
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = 0
for i in range(1, lowerCamelCase ):
if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 681 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase ={
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[Any] = """"""
else:
lowerCamelCase : Optional[int] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : Any = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCamelCase : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = dct.pop(lowerCamelCase )
lowerCamelCase : str = val
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = ViTMSNConfig()
lowerCamelCase : Tuple = 1000
lowerCamelCase : List[Any] = """datasets/huggingface/label-files"""
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) )
lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[int] = idalabel
lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase : int = 384
lowerCamelCase : Optional[int] = 1536
lowerCamelCase : Tuple = 6
elif "l16" in checkpoint_url:
lowerCamelCase : Dict = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Optional[int] = 24
lowerCamelCase : str = 16
lowerCamelCase : str = 0.1
elif "b4" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowerCamelCase : Tuple = 7
lowerCamelCase : Optional[int] = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Tuple = 24
lowerCamelCase : Dict = 16
lowerCamelCase : str = 0.1
lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase )
lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""]
lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase )
lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
lowerCamelCase : Union[str, Any] = ViTImageProcessor(
size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase )
lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : int = model(**lowerCamelCase )
lowerCamelCase : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 681 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681 |
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
_lowerCamelCase =[
(1_0_0_0, """M"""),
(9_0_0, """CM"""),
(5_0_0, """D"""),
(4_0_0, """CD"""),
(1_0_0, """C"""),
(9_0, """XC"""),
(5_0, """L"""),
(4_0, """XL"""),
(1_0, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : Dict = 0
while place < len(lowerCamelCase ):
if (place + 1 < len(lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _a ( lowerCamelCase ):
lowerCamelCase : Any = []
for arabic, roman in ROMAN:
((lowerCamelCase) , (lowerCamelCase)) : List[str] = divmod(lowerCamelCase, lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowerCamelCase =logging.get_logger(__name__)
if is_vision_available():
import PIL
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = True , __magic_name__ = None , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = size if size is not None else {"""shortest_edge""": 2_2_4}
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ , param_name="""crop_size""" )
lowerCamelCase : Optional[Any] = do_resize
lowerCamelCase : Union[str, Any] = size
lowerCamelCase : Optional[int] = resample
lowerCamelCase : str = do_center_crop
lowerCamelCase : int = crop_size
lowerCamelCase : Dict = do_rescale
lowerCamelCase : Any = rescale_factor
lowerCamelCase : Any = do_normalize
lowerCamelCase : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase : List[str] = do_convert_rgb
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : int = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase : str = get_resize_output_image_size(__magic_name__ , size=size["""shortest_edge"""] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : int = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__magic_name__ , size=(size["""height"""], size["""width"""]) , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : List[Any] = size if size is not None else self.size
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , param_name="""size""" , default_to_square=__magic_name__ )
lowerCamelCase : Dict = resample if resample is not None else self.resample
lowerCamelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase : Optional[Any] = get_size_dict(__magic_name__ , param_name="""crop_size""" , default_to_square=__magic_name__ )
lowerCamelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Dict = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Optional[Any] = image_std if image_std is not None else self.image_std
lowerCamelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase : Optional[Any] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase : List[str] = [convert_to_rgb(__magic_name__ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase : List[str] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : Optional[int] = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
lowerCamelCase : Optional[int] = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : int = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : int = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 681 |
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = KandinskyInpaintPipeline
_UpperCAmelCase : Optional[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_UpperCAmelCase : Optional[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_UpperCAmelCase : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_UpperCAmelCase : List[Any] = False
@property
def UpperCamelCase__ ( self ):
return 3_2
@property
def UpperCamelCase__ ( self ):
return 3_2
@property
def UpperCamelCase__ ( self ):
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
return 1_0_0
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
lowerCamelCase : str = MultilingualCLIP(__magic_name__ )
lowerCamelCase : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCamelCase : List[str] = UNetaDConditionModel(**__magic_name__ )
return model
@property
def UpperCamelCase__ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.dummy_text_encoder
lowerCamelCase : str = self.dummy_tokenizer
lowerCamelCase : Optional[int] = self.dummy_unet
lowerCamelCase : Dict = self.dummy_movq
lowerCamelCase : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__magic_name__ , )
lowerCamelCase : Optional[int] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=0 ):
lowerCamelCase : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase : Any = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
lowerCamelCase : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
lowerCamelCase : List[Any] = 0
if str(__magic_name__ ).startswith("""mps""" ):
lowerCamelCase : Union[str, Any] = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase : Union[str, Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase : List[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : Dict = self.pipeline_class(**__magic_name__ )
lowerCamelCase : int = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Any = pipe(**self.get_dummy_inputs(__magic_name__ ) )
lowerCamelCase : Dict = output.images
lowerCamelCase : Any = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
lowerCamelCase : str = image[0, -3:, -3:, -1]
lowerCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : Union[str, Any] = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowerCamelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCamelCase : List[Any] = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
lowerCamelCase : str = 0
lowerCamelCase : int = """a hat"""
lowerCamelCase : Dict = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
lowerCamelCase : Tuple = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowerCamelCase : Optional[Any] = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase , lowerCamelCase : Optional[int] = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCamelCase : str = pipeline(
__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="""np""" , )
lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 681 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ):
lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Union[str, Any] = max_resolution
lowerCamelCase : Union[str, Any] = do_resize
lowerCamelCase : int = size
lowerCamelCase : int = do_center_crop
lowerCamelCase : Union[str, Any] = crop_size
lowerCamelCase : Union[str, Any] = do_normalize
lowerCamelCase : Dict = image_mean
lowerCamelCase : Optional[Any] = image_std
lowerCamelCase : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase : Dict = []
for i in range(self.batch_size ):
lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ )
lowerCamelCase : Any = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 681 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : Optional[Any] = 2
@add_end_docstrings(__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase : str = None
if self.model.config.prefix is not None:
lowerCamelCase : Optional[int] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase : Dict = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._sanitize_parameters(prefix=__magic_name__ , **self._forward_params )
lowerCamelCase : Any = {**self._preprocess_params, **preprocess_params}
lowerCamelCase : Optional[int] = {**self._forward_params, **forward_params}
def UpperCamelCase__ ( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
lowerCamelCase : Dict = {}
if prefix is not None:
lowerCamelCase : Optional[Any] = prefix
if prefix:
lowerCamelCase : Tuple = self.tokenizer(
__magic_name__ , padding=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=self.framework )
lowerCamelCase : Dict = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""" )
lowerCamelCase : int = handle_long_generation
preprocess_params.update(__magic_name__ )
lowerCamelCase : Union[str, Any] = generate_kwargs
lowerCamelCase : int = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
lowerCamelCase : Union[str, Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
lowerCamelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
lowerCamelCase : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase : Union[str, Any] = self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
if len(__magic_name__ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCamelCase : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*__magic_name__ , **__magic_name__ )
def __call__( self , __magic_name__ , **__magic_name__ ):
return super().__call__(__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__="" , __magic_name__=None , **__magic_name__ ):
lowerCamelCase : int = self.tokenizer(
prefix + prompt_text , padding=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=self.framework )
lowerCamelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
lowerCamelCase : Optional[int] = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase : Optional[Any] = generate_kwargs["""max_new_tokens"""]
else:
lowerCamelCase : Tuple = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase : Union[str, Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
lowerCamelCase : int = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase : Dict = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def UpperCamelCase__ ( self , __magic_name__ , **__magic_name__ ):
lowerCamelCase : Union[str, Any] = model_inputs["""input_ids"""]
lowerCamelCase : Optional[int] = model_inputs.get("""attention_mask""" , __magic_name__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase : Optional[int] = None
lowerCamelCase : List[Any] = None
lowerCamelCase : List[str] = 1
else:
lowerCamelCase : Optional[Any] = input_ids.shape[0]
lowerCamelCase : int = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase : int = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
lowerCamelCase : str = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase : str = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase : Any = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase : Tuple = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ , **__magic_name__ )
lowerCamelCase : Any = generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase : Union[str, Any] = generated_sequence.reshape(__magic_name__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase : List[Any] = tf.reshape(__magic_name__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=ReturnType.FULL_TEXT , __magic_name__=True ):
lowerCamelCase : Optional[Any] = model_outputs["""generated_sequence"""][0]
lowerCamelCase : Dict = model_outputs["""input_ids"""]
lowerCamelCase : List[Any] = model_outputs["""prompt_text"""]
lowerCamelCase : List[Any] = generated_sequence.numpy().tolist()
lowerCamelCase : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase : Union[str, Any] = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase : List[Any] = self.tokenizer.decode(
__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase : Tuple = 0
else:
lowerCamelCase : Optional[int] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
lowerCamelCase : Dict = text[prompt_length:]
lowerCamelCase : Union[str, Any] = {"""generated_text""": all_text}
records.append(__magic_name__ )
return records
| 681 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__magic_name__ ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase : str = FlaxAutoModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__magic_name__ ):
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase : Any = FlaxAutoModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCamelCase : int = AutoTokenizer.from_pretrained(__magic_name__ )
lowerCamelCase : int = FlaxBertModel.from_pretrained(__magic_name__ )
lowerCamelCase : Any = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__magic_name__ ):
return model(**__magic_name__ )
eval(**__magic_name__ ).block_until_ready()
@slow
def UpperCamelCase__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(__magic_name__ )
lowerCamelCase : Optional[int] = FlaxRobertaModel.from_pretrained(__magic_name__ )
lowerCamelCase : Any = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__magic_name__ ):
return model(**__magic_name__ )
eval(**__magic_name__ ).block_until_ready()
def UpperCamelCase__ ( self ):
with self.assertRaisesRegex(
__magic_name__ , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase : Any = FlaxAutoModel.from_pretrained("""bert-base""" )
def UpperCamelCase__ ( self ):
with self.assertRaisesRegex(
__magic_name__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase : Union[str, Any] = FlaxAutoModel.from_pretrained(__magic_name__ , revision="""aaaaaa""" )
def UpperCamelCase__ ( self ):
with self.assertRaisesRegex(
__magic_name__ , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
lowerCamelCase : List[str] = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCamelCase__ ( self ):
with self.assertRaisesRegex(__magic_name__ , """Use `from_pt=True` to load this model""" ):
lowerCamelCase : str = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 681 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 681 | 1 |
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = sum(lowerCamelCase )
lowerCamelCase : int = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1, n + 1 ):
lowerCamelCase : List[str] = True
for i in range(1, s + 1 ):
lowerCamelCase : str = False
for i in range(1, n + 1 ):
for j in range(1, s + 1 ):
lowerCamelCase : List[Any] = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCamelCase : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ), -1, -1 ):
if dp[n][j] is True:
lowerCamelCase : Optional[int] = s - 2 * j
break
return diff
| 681 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="""▁"""
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = BertGenerationTokenizer
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """<s>"""
lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """Hello World!"""
lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase : str = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase : Dict = """ """.join(__magic_name__ )
lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = BertGenerationConfig()
lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 681 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """naver-clova-ix/donut-base-finetuned-docvqa"""
_UpperCAmelCase : Any = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
_UpperCAmelCase : str = """document_qa"""
_UpperCAmelCase : Optional[Any] = AutoProcessor
_UpperCAmelCase : List[Any] = VisionEncoderDecoderModel
_UpperCAmelCase : Optional[int] = ["""image""", """text"""]
_UpperCAmelCase : Union[str, Any] = ["""text"""]
def __init__( self , *__magic_name__ , **__magic_name__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Tuple = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCamelCase : Union[str, Any] = task_prompt.replace("""{user_input}""" , __magic_name__ )
lowerCamelCase : Dict = self.pre_processor.tokenizer(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors="""pt""" ).input_ids
lowerCamelCase : List[Any] = self.pre_processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase__ ( self , __magic_name__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__magic_name__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__magic_name__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__magic_name__ , ).sequences
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[Any] = self.pre_processor.batch_decode(__magic_name__ )[0]
lowerCamelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
lowerCamelCase : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
lowerCamelCase : str = re.sub(r"""<.*?>""" , """""" , __magic_name__ , count=1 ).strip() # remove first task start token
lowerCamelCase : str = self.pre_processor.tokenajson(__magic_name__ )
return sequence["answer"]
| 681 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
_lowerCamelCase =datasets.logging.get_logger(__name__)
_lowerCamelCase ="""\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
_lowerCamelCase ="""\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
_lowerCamelCase ="""
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def UpperCamelCase__ ( self , __magic_name__ ):
if self.config_name == "default":
lowerCamelCase : Optional[Any] = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
lowerCamelCase : List[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=False ):
if gpus is None:
lowerCamelCase : str = 1 if torch.cuda.is_available() else 0
lowerCamelCase : Tuple = {"""src""": sources, """mt""": predictions, """ref""": references}
lowerCamelCase : Union[str, Any] = [dict(zip(__magic_name__ , __magic_name__ ) ) for t in zip(*data.values() )]
lowerCamelCase , lowerCamelCase : List[str] = self.scorer.predict(__magic_name__ , gpus=__magic_name__ , progress_bar=__magic_name__ )
return {"mean_score": mean_score, "scores": scores}
| 681 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase ):
if len(lowerCamelCase ) < k or k < 0:
raise ValueError("""Invalid Input""" )
lowerCamelCase : str = sum(array[:k] )
for i in range(len(lowerCamelCase ) - k ):
lowerCamelCase : Optional[int] = current_sum - array[i] + array[i + k]
lowerCamelCase : Optional[Any] = max(lowerCamelCase, lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_lowerCamelCase =[randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
_lowerCamelCase =randint(0, 1_1_0)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 681 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCamelCase ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCamelCase ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase : Any = numpy_to_pil(lowerCamelCase )
return images
def _a ( lowerCamelCase ):
if images.ndim == 3:
lowerCamelCase : Optional[Any] = images[None, ...]
lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images]
else:
lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 681 | 1 |
from __future__ import annotations
import math
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCamelCase, lowerCamelCase, lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCamelCase, lowerCamelCase, lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, lowerCamelCase, lowerCamelCase, lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCamelCase, lowerCamelCase, lowerCamelCase ), )
def _a ( ):
lowerCamelCase : List[Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
lowerCamelCase : Dict = math.log(len(lowerCamelCase ), 2 )
print("""Optimal value : """, end="""""" )
print(minimax(0, 0, lowerCamelCase, lowerCamelCase, lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 681 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 | 1 |
def _a ( lowerCamelCase = 10, lowerCamelCase = 22 ):
lowerCamelCase : List[Any] = range(1, lowerCamelCase )
lowerCamelCase : Optional[int] = range(1, lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 681 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="""▁"""
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = BertGenerationTokenizer
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """<s>"""
lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """Hello World!"""
lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase : str = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase : Dict = """ """.join(__magic_name__ )
lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = BertGenerationConfig()
lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 681 | 1 |
from functools import reduce
_lowerCamelCase =(
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _a ( lowerCamelCase = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase, lowerCamelCase : str(int(lowerCamelCase ) * int(lowerCamelCase ) ), n[i : i + 13] ) )
for i in range(len(lowerCamelCase ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 681 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase =HfArgumentParser(InitializationArguments)
_lowerCamelCase =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 681 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[Any] = """transfo-xl"""
_UpperCAmelCase : int = ["""mems"""]
_UpperCAmelCase : Any = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=2_6_7_7_3_5 , __magic_name__=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , __magic_name__=1_0_2_4 , __magic_name__=1_0_2_4 , __magic_name__=1_6 , __magic_name__=6_4 , __magic_name__=4_0_9_6 , __magic_name__=4 , __magic_name__=False , __magic_name__=1_8 , __magic_name__=1_6_0_0 , __magic_name__=1_0_0_0 , __magic_name__=True , __magic_name__=True , __magic_name__=0 , __magic_name__=-1 , __magic_name__=True , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="normal" , __magic_name__=0.01 , __magic_name__=0.01 , __magic_name__=0.02 , __magic_name__=1e-5 , __magic_name__=0 , **__magic_name__ , ):
lowerCamelCase : Any = vocab_size
lowerCamelCase : int = []
self.cutoffs.extend(__magic_name__ )
if proj_share_all_but_first:
lowerCamelCase : Tuple = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase : List[Any] = [False] + [False] * len(self.cutoffs )
lowerCamelCase : List[str] = d_model
lowerCamelCase : int = d_embed
lowerCamelCase : Tuple = d_head
lowerCamelCase : Dict = d_inner
lowerCamelCase : Optional[int] = div_val
lowerCamelCase : List[str] = pre_lnorm
lowerCamelCase : List[str] = n_layer
lowerCamelCase : Optional[int] = n_head
lowerCamelCase : Optional[int] = mem_len
lowerCamelCase : Union[str, Any] = same_length
lowerCamelCase : Union[str, Any] = attn_type
lowerCamelCase : List[Any] = clamp_len
lowerCamelCase : Tuple = sample_softmax
lowerCamelCase : int = adaptive
lowerCamelCase : Optional[Any] = dropout
lowerCamelCase : Dict = dropatt
lowerCamelCase : List[Any] = untie_r
lowerCamelCase : Optional[Any] = init
lowerCamelCase : List[Any] = init_range
lowerCamelCase : str = proj_init_std
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def UpperCamelCase__ ( self , __magic_name__ ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 681 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self , __magic_name__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """sshleifer/tiny-gpt2"""
lowerCamelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """sgugger/tiny-distilbert-classification"""
lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """sshleifer/tiny-gpt2"""
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , )
lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , """sequential""" ) )
self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) )
self.assertTrue(hasattr(__magic_name__ , """current""" ) )
self.assertTrue(hasattr(__magic_name__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
| 681 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : Tuple = data
lowerCamelCase : Optional[int] = [0X67452301, 0XEFCDAB89, 0X98BADCFE, 0X10325476, 0XC3D2E1F0]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
return ((n << b) | (n >> (3_2 - b))) & 0XFFFFFFFF
def UpperCamelCase__ ( self ):
lowerCamelCase : str = b"""\x80""" + b"""\x00""" * (6_3 - (len(self.data ) + 8) % 6_4)
lowerCamelCase : int = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase__ ( self ):
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Union[str, Any] = list(struct.unpack(""">16L""" , __magic_name__ ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
lowerCamelCase : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.padding()
lowerCamelCase : str = self.split_blocks()
for block in self.blocks:
lowerCamelCase : List[Any] = self.expand_block(__magic_name__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
lowerCamelCase : Union[str, Any] = (b & c) | ((~b) & d)
lowerCamelCase : int = 0X5A827999
elif 2_0 <= i < 4_0:
lowerCamelCase : Any = b ^ c ^ d
lowerCamelCase : Optional[Any] = 0X6ED9EBA1
elif 4_0 <= i < 6_0:
lowerCamelCase : Tuple = (b & c) | (b & d) | (c & d)
lowerCamelCase : List[Any] = 0X8F1BBCDC
elif 6_0 <= i < 8_0:
lowerCamelCase : List[str] = b ^ c ^ d
lowerCamelCase : int = 0XCA62C1D6
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = (
self.rotate(__magic_name__ , 5 ) + f + e + k + expanded_block[i] & 0XFFFFFFFF,
a,
self.rotate(__magic_name__ , 3_0 ),
c,
d,
)
lowerCamelCase : List[Any] = (
self.h[0] + a & 0XFFFFFFFF,
self.h[1] + b & 0XFFFFFFFF,
self.h[2] + c & 0XFFFFFFFF,
self.h[3] + d & 0XFFFFFFFF,
self.h[4] + e & 0XFFFFFFFF,
)
return ("{:08x}" * 5).format(*self.h )
def _a ( ):
lowerCamelCase : Tuple = B"""Test String"""
assert SHAaHash(lowerCamelCase ).final_hash() == hashlib.shaa(lowerCamelCase ).hexdigest() # noqa: S324
def _a ( ):
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""", dest="""input_string""", default="""Hello World!! Welcome to Cryptography""", help="""Hash the string""", )
parser.add_argument("""--file""", dest="""input_file""", help="""Hash contents of a file""" )
lowerCamelCase : int = parser.parse_args()
lowerCamelCase : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file, """rb""" ) as f:
lowerCamelCase : Optional[int] = f.read()
else:
lowerCamelCase : Optional[int] = bytes(lowerCamelCase, """utf-8""" )
print(SHAaHash(lowerCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 681 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _a ( lowerCamelCase ):
return x + 2
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """x = 3"""
lowerCamelCase : Tuple = {}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
lowerCamelCase : Optional[int] = """x = y"""
lowerCamelCase : Tuple = {"""y""": 5}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """y = add_two(x)"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """x = 3"""
lowerCamelCase : Dict = {}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """x = 3\ny = 5"""
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """text = f'This is x: {x}.'"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowerCamelCase : Tuple = {"""x""": 3}
lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} )
lowerCamelCase : Tuple = {"""x""": 8}
lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = """test_list = [x, add_two(x)]"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """y = x"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowerCamelCase : Any = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowerCamelCase : Dict = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i"""
lowerCamelCase : int = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
| 681 | 1 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ="""▁"""
_lowerCamelCase ={"""vocab_file""": """prophetnet.tokenizer"""}
_lowerCamelCase ={
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
_lowerCamelCase ={
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
_lowerCamelCase ={
"""microsoft/xprophetnet-large-wiki100-cased""": 5_1_2,
}
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = collections.OrderedDict()
with open(lowerCamelCase, """r""", encoding="""utf-8""" ) as reader:
lowerCamelCase : str = reader.readlines()
for index, token in enumerate(lowerCamelCase ):
lowerCamelCase : Optional[int] = token.rstrip("""\n""" )
lowerCamelCase : Tuple = index
return vocab
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__ , __magic_name__="[SEP]" , __magic_name__="[SEP]" , __magic_name__="[SEP]" , __magic_name__="[UNK]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCamelCase : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase : List[str] = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(1_0 ):
lowerCamelCase : List[str] = F'''[unused{i}]'''
lowerCamelCase : int = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase : Dict = 1_2
lowerCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__magic_name__ )
def __getstate__( self ):
lowerCamelCase : str = self.__dict__.copy()
lowerCamelCase : Any = None
return state
def __setstate__( self , __magic_name__ ):
lowerCamelCase : List[Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase : int = {}
lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
return len(self.sp_model ) + self.fairseq_offset
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , __magic_name__ ):
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase : Optional[int] = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , __magic_name__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : int = """""".join(__magic_name__ ).replace(__magic_name__ , """ """ ).strip()
return out_string
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : List[Any] = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , """wb""" ) as fi:
lowerCamelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 681 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def _a ( lowerCamelCase, lowerCamelCase=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[str] = n - 1
lowerCamelCase : List[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : str = 0
while count < prec:
lowerCamelCase : str = random.randint(2, n - 1 )
lowerCamelCase : Dict = bin_exp_mod(lowerCamelCase, lowerCamelCase, lowerCamelCase )
if b != 1:
lowerCamelCase : Any = True
for _ in range(lowerCamelCase ):
if b == n - 1:
lowerCamelCase : Optional[int] = False
break
lowerCamelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase =abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCamelCase =logging.get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = question_encoder
lowerCamelCase : Dict = generator
lowerCamelCase : Tuple = self.question_encoder
def UpperCamelCase__ ( self , __magic_name__ ):
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" )
lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ )
if config is None:
lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ):
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.question_encoder
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.generator
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __magic_name__ , )
if max_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : int = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : Dict = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[Any] = labels["""input_ids"""]
return model_inputs
| 681 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowerCamelCase =1_6
_lowerCamelCase =3_2
def _a ( lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = "bert-base-cased" ):
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCamelCase : Any = load_dataset("""glue""", """mrpc""" )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase : Tuple = tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=lowerCamelCase, max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase : Union[str, Any] = datasets.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=["""idx""", """sentence1""", """sentence2"""], load_from_cache_file=lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase : Dict = tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase, padding="""max_length""", max_length=128, return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase, padding="""longest""", return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase : List[str] = DataLoader(
tokenized_datasets["""train"""], shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=lowerCamelCase )
lowerCamelCase : Tuple = DataLoader(
tokenized_datasets["""validation"""], shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
model.eval()
lowerCamelCase : List[Any] = 0
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase : str = model(**lowerCamelCase )
lowerCamelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase , lowerCamelCase : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase ) - 1:
lowerCamelCase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase, references=lowerCamelCase, )
lowerCamelCase : Dict = metric.compute()
return eval_metric["accuracy"]
def _a ( lowerCamelCase, lowerCamelCase ):
# Initialize accelerator
lowerCamelCase : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase : int = config["""lr"""]
lowerCamelCase : List[Any] = int(config["""num_epochs"""] )
lowerCamelCase : Optional[int] = int(config["""seed"""] )
lowerCamelCase : str = int(config["""batch_size"""] )
lowerCamelCase : Any = args.model_name_or_path
set_seed(lowerCamelCase )
lowerCamelCase , lowerCamelCase : int = get_dataloaders(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase, return_dict=lowerCamelCase )
# Instantiate optimizer
lowerCamelCase : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase : Optional[int] = optimizer_cls(params=model.parameters(), lr=lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase : Optional[int] = 1
lowerCamelCase : Optional[Any] = (len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase, num_warmup_steps=0, num_training_steps=lowerCamelCase, )
else:
lowerCamelCase : List[Any] = DummyScheduler(lowerCamelCase, total_num_steps=lowerCamelCase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = accelerator.prepare(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase : Any = 0
lowerCamelCase : Any = evaluate.load("""glue""", """mrpc""" )
lowerCamelCase : Union[str, Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase : Optional[int] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase : Any = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCamelCase : List[str] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase : List[Any] = int(lowerCamelCase ) + 1
lowerCamelCase : str = evaluation_loop(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
accelerator.print("""resumed checkpoint performance:""", lowerCamelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""", lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""", optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir, F'''state_{starting_epoch-1}.json''' ), """r""" ) as f:
lowerCamelCase : List[str] = json.load(lowerCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase : Union[str, Any] = {}
for epoch in range(lowerCamelCase, lowerCamelCase ):
model.train()
for step, batch in enumerate(lowerCamelCase ):
lowerCamelCase : List[str] = model(**lowerCamelCase )
lowerCamelCase : Tuple = outputs.loss
lowerCamelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase : str = F'''epoch_{epoch}'''
lowerCamelCase : Optional[int] = os.path.join(args.output_dir, lowerCamelCase )
accelerator.save_state(lowerCamelCase )
lowerCamelCase : int = evaluation_loop(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = accuracy
lowerCamelCase : List[Any] = lr_scheduler.get_lr()[0]
lowerCamelCase : Any = optimizer.param_groups[0]["""lr"""]
lowerCamelCase : int = epoch
lowerCamelCase : Any = overall_step
accelerator.print(F'''epoch {epoch}:''', lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'''state_{epoch}.json''' ), """w""" ) as f:
json.dump(lowerCamelCase, lowerCamelCase )
def _a ( ):
lowerCamelCase : str = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""", type=lowerCamelCase, default="""bert-base-cased""", help="""Path to pretrained model or model identifier from huggingface.co/models.""", required=lowerCamelCase, )
parser.add_argument(
"""--output_dir""", type=lowerCamelCase, default=""".""", help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""", )
parser.add_argument(
"""--resume_from_checkpoint""", type=lowerCamelCase, default=lowerCamelCase, help="""If the training should continue from a checkpoint folder.""", )
parser.add_argument(
"""--partial_train_epoch""", type=lowerCamelCase, default=lowerCamelCase, help="""If passed, the training will stop after this number of epochs.""", )
parser.add_argument(
"""--num_epochs""", type=lowerCamelCase, default=2, help="""Number of train epochs.""", )
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : Optional[int] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase, lowerCamelCase )
if __name__ == "__main__":
main()
| 681 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = F'''{sampling_rate}'''
lowerCamelCase : Optional[int] = """1"""
lowerCamelCase : Any = """f32le"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ):
lowerCamelCase : Dict = F'''{sampling_rate}'''
lowerCamelCase : List[Any] = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Any = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Dict = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase : Dict = platform.system()
if system == "Linux":
lowerCamelCase : Union[str, Any] = """alsa"""
lowerCamelCase : List[Any] = """default"""
elif system == "Darwin":
lowerCamelCase : List[Any] = """avfoundation"""
lowerCamelCase : List[Any] = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Any = """default"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase )
for item in iterator:
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ):
if stream_chunk_s is not None:
lowerCamelCase : int = stream_chunk_s
else:
lowerCamelCase : Dict = chunk_length_s
lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase : Optional[int] = np.intaa
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : int = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase, (int, float) ):
lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s]
lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : List[Any] = datetime.datetime.now()
lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase )
lowerCamelCase : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : Optional[int] = B""""""
lowerCamelCase , lowerCamelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : str = (_stride_left, stride_right)
lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : Optional[int] = False
yield item
lowerCamelCase : str = stride_left
lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[Any] = False
yield item
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 681 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowerCamelCase =None
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase ={
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
_lowerCamelCase ={
"""google/rembert""": 2_5_6,
}
_lowerCamelCase ="""▁"""
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Union[str, Any] = RemBertTokenizer
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__="[SEP]" , __magic_name__="<unk>" , __magic_name__="[SEP]" , __magic_name__="<pad>" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , **__magic_name__ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[str] = do_lower_case
lowerCamelCase : Dict = remove_space
lowerCamelCase : Optional[int] = keep_accents
lowerCamelCase : Union[str, Any] = vocab_file
lowerCamelCase : List[str] = False if not self.vocab_file else True
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : List[Any] = [self.sep_token_id]
lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Tuple = [self.sep_token_id]
lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if not os.path.isdir(__magic_name__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__magic_name__ ) )
return
lowerCamelCase : List[Any] = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 681 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , )
assert hasattr(self , """env""" )
def UpperCamelCase__ ( self , __magic_name__ ):
# configuration for running training on smdistributed Model Parallel
lowerCamelCase : Any = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCamelCase : Any = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , )
def UpperCamelCase__ ( self , __magic_name__ ):
TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCamelCase__ ( self , __magic_name__ ):
# create estimator
lowerCamelCase : int = self.create_estimator(__magic_name__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
| 681 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """perceiver"""
def __init__( self , __magic_name__=2_5_6 , __magic_name__=1_2_8_0 , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=2_6 , __magic_name__=8 , __magic_name__=8 , __magic_name__=None , __magic_name__=None , __magic_name__="kv" , __magic_name__=1 , __magic_name__=1 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=2_6_2 , __magic_name__=2_0_4_8 , __magic_name__=5_6 , __magic_name__=[3_6_8, 4_9_6] , __magic_name__=1_6 , __magic_name__=1_9_2_0 , __magic_name__=1_6 , __magic_name__=[1, 1_6, 2_2_4, 2_2_4] , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Optional[Any] = num_latents
lowerCamelCase : str = d_latents
lowerCamelCase : Optional[Any] = d_model
lowerCamelCase : Any = num_blocks
lowerCamelCase : Union[str, Any] = num_self_attends_per_block
lowerCamelCase : Optional[int] = num_self_attention_heads
lowerCamelCase : int = num_cross_attention_heads
lowerCamelCase : List[str] = qk_channels
lowerCamelCase : Any = v_channels
lowerCamelCase : List[Any] = cross_attention_shape_for_attention
lowerCamelCase : Union[str, Any] = self_attention_widening_factor
lowerCamelCase : int = cross_attention_widening_factor
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Optional[Any] = layer_norm_eps
lowerCamelCase : Union[str, Any] = use_query_residual
# masked language modeling attributes
lowerCamelCase : Dict = vocab_size
lowerCamelCase : Union[str, Any] = max_position_embeddings
# image classification attributes
lowerCamelCase : Optional[Any] = image_size
# flow attributes
lowerCamelCase : Tuple = train_size
# multimodal autoencoding attributes
lowerCamelCase : Tuple = num_frames
lowerCamelCase : Optional[int] = audio_samples_per_frame
lowerCamelCase : List[Any] = samples_per_patch
lowerCamelCase : str = output_shape
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 3 , __magic_name__ = 4_0 , __magic_name__ = 4_0 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__magic_name__ , __magic_name__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase : Dict = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase : Any = preprocessor.num_special_tokens_to_add(__magic_name__ )
lowerCamelCase : Optional[Any] = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase : Optional[int] = [""" """.join(["""a"""] ) * seq_length] * batch_size
lowerCamelCase : Optional[int] = dict(preprocessor(__magic_name__ , return_tensors=__magic_name__ ) )
lowerCamelCase : List[str] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(__magic_name__ , __magic_name__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase : List[str] = compute_effective_axis_dimension(__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCamelCase : Any = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase : List[Any] = dict(preprocessor(images=__magic_name__ , return_tensors=__magic_name__ ) )
lowerCamelCase : Optional[int] = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 681 |
from __future__ import annotations
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = str(lowerCamelCase )
return n == n[::-1]
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = 0
for i in range(1, lowerCamelCase ):
if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 681 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """M-CLIP"""
def __init__( self , __magic_name__=1_0_2_4 , __magic_name__=7_6_8 , **__magic_name__ ):
lowerCamelCase : Optional[int] = transformerDimSize
lowerCamelCase : List[Any] = imageDimSize
super().__init__(**__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[Any] = MCLIPConfig
def __init__( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = XLMRobertaModel(__magic_name__ )
lowerCamelCase : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[Any] = self.transformer(input_ids=__magic_name__ , attention_mask=__magic_name__ )[0]
lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__magic_name__ ), embs
| 681 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[Any] = """"""
else:
lowerCamelCase : Optional[int] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : Any = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCamelCase : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = dct.pop(lowerCamelCase )
lowerCamelCase : str = val
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = ViTMSNConfig()
lowerCamelCase : Tuple = 1000
lowerCamelCase : List[Any] = """datasets/huggingface/label-files"""
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) )
lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[int] = idalabel
lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase : int = 384
lowerCamelCase : Optional[int] = 1536
lowerCamelCase : Tuple = 6
elif "l16" in checkpoint_url:
lowerCamelCase : Dict = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Optional[int] = 24
lowerCamelCase : str = 16
lowerCamelCase : str = 0.1
elif "b4" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowerCamelCase : Tuple = 7
lowerCamelCase : Optional[int] = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Tuple = 24
lowerCamelCase : Dict = 16
lowerCamelCase : str = 0.1
lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase )
lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""]
lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase )
lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
lowerCamelCase : Union[str, Any] = ViTImageProcessor(
size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase )
lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : int = model(**lowerCamelCase )
lowerCamelCase : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 681 | 1 |
import os
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = len(grid[0] )
lowerCamelCase : List[str] = len(lowerCamelCase )
lowerCamelCase : Any = 0
lowerCamelCase : Dict = 0
lowerCamelCase : str = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowerCamelCase ):
for j in range(n_rows - 3 ):
lowerCamelCase : str = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCamelCase : Dict = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCamelCase : Optional[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCamelCase : Tuple = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCamelCase : Optional[int] = max(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
if max_product > largest:
lowerCamelCase : Union[str, Any] = max_product
return largest
def _a ( ):
lowerCamelCase : List[str] = []
with open(os.path.dirname(lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
lowerCamelCase : Any = [[int(lowerCamelCase ) for i in grid[j]] for j in range(len(lowerCamelCase ) )]
return largest_product(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 681 |
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
_lowerCamelCase ={
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = EfficientNetConfig()
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["""hidden_dim"""]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["""width_coef"""]
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["""depth_coef"""]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""]
lowerCamelCase : Any = CONFIG_MAP[model_name]["""dropout_rate"""]
lowerCamelCase : Any = CONFIG_MAP[model_name]["""dw_padding"""]
lowerCamelCase : Dict = """huggingface/label-files"""
lowerCamelCase : Dict = """imagenet-1k-id2label.json"""
lowerCamelCase : Tuple = 1000
lowerCamelCase : Tuple = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type="""dataset""" ), """r""" ) )
lowerCamelCase : int = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ):
lowerCamelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : int = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
def _a ( lowerCamelCase ):
lowerCamelCase : int = CONFIG_MAP[model_name]["""image_size"""]
lowerCamelCase : Optional[Any] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size}, image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3], do_center_crop=lowerCamelCase, )
return preprocessor
def _a ( lowerCamelCase ):
lowerCamelCase : List[str] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
lowerCamelCase : int = sorted(set(lowerCamelCase ) )
lowerCamelCase : Any = len(lowerCamelCase )
lowerCamelCase : Tuple = {b: str(lowerCamelCase ) for b, i in zip(lowerCamelCase, range(lowerCamelCase ) )}
lowerCamelCase : Dict = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
lowerCamelCase : int = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
lowerCamelCase : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : Optional[Any] = """efficientnet.""" + item[1]
lowerCamelCase : Any = """classifier.weight"""
lowerCamelCase : Dict = """classifier.bias"""
return key_mapping
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : Dict = torch.from_numpy(lowerCamelCase ).permute(3, 2, 0, 1 )
elif "depthwise_kernel" in key:
lowerCamelCase : Dict = torch.from_numpy(lowerCamelCase ).permute(2, 3, 0, 1 )
elif "kernel" in key:
lowerCamelCase : str = torch.from_numpy(np.transpose(lowerCamelCase ) )
else:
lowerCamelCase : List[str] = torch.from_numpy(lowerCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase )
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Tuple = model_classes[model_name](
include_top=lowerCamelCase, weights="""imagenet""", input_tensor=lowerCamelCase, input_shape=lowerCamelCase, pooling=lowerCamelCase, classes=1000, classifier_activation="""softmax""", )
lowerCamelCase : Optional[int] = original_model.trainable_variables
lowerCamelCase : str = original_model.non_trainable_variables
lowerCamelCase : str = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : Optional[Any] = param.numpy()
lowerCamelCase : List[Any] = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(lowerCamelCase )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(lowerCamelCase ).eval()
lowerCamelCase : List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
lowerCamelCase : str = rename_keys(lowerCamelCase )
replace_params(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Initialize preprocessor and preprocess input image
lowerCamelCase : List[str] = convert_image_processor(lowerCamelCase )
lowerCamelCase : List[Any] = preprocessor(images=prepare_img(), return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : List[Any] = hf_model(**lowerCamelCase )
lowerCamelCase : List[Any] = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""]
lowerCamelCase : Any = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST )
lowerCamelCase : Tuple = image.img_to_array(lowerCamelCase )
lowerCamelCase : Optional[Any] = np.expand_dims(lowerCamelCase, axis=0 )
lowerCamelCase : List[str] = original_model.predict(lowerCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase, lowerCamelCase, atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase ):
os.mkdir(lowerCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase )
preprocessor.save_pretrained(lowerCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : List[Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCamelCase )
hf_model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
_lowerCamelCase =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 681 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCamelCase ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCamelCase ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase : Any = numpy_to_pil(lowerCamelCase )
return images
def _a ( lowerCamelCase ):
if images.ndim == 3:
lowerCamelCase : Optional[Any] = images[None, ...]
lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images]
else:
lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 681 |
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = """markuplm"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__=0 , __magic_name__=2 , __magic_name__=2_5_6 , __magic_name__=1_0_2_4 , __magic_name__=2_1_6 , __magic_name__=1_0_0_1 , __magic_name__=3_2 , __magic_name__=5_0 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : Union[str, Any] = max_position_embeddings
lowerCamelCase : Optional[int] = type_vocab_size
lowerCamelCase : int = initializer_range
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Dict = position_embedding_type
lowerCamelCase : Dict = use_cache
lowerCamelCase : str = classifier_dropout
# additional properties
lowerCamelCase : Optional[int] = max_depth
lowerCamelCase : List[str] = max_xpath_tag_unit_embeddings
lowerCamelCase : Tuple = max_xpath_subs_unit_embeddings
lowerCamelCase : Union[str, Any] = tag_pad_id
lowerCamelCase : Tuple = subs_pad_id
lowerCamelCase : Any = xpath_unit_hidden_size
| 681 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ):
lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Union[str, Any] = max_resolution
lowerCamelCase : Union[str, Any] = do_resize
lowerCamelCase : int = size
lowerCamelCase : int = do_center_crop
lowerCamelCase : Union[str, Any] = crop_size
lowerCamelCase : Union[str, Any] = do_normalize
lowerCamelCase : Dict = image_mean
lowerCamelCase : Optional[Any] = image_std
lowerCamelCase : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase : Dict = []
for i in range(self.batch_size ):
lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ )
lowerCamelCase : Any = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 681 | 1 |
import math
def _a ( lowerCamelCase, lowerCamelCase ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_lowerCamelCase ="""Enter the base and the power separated by a comma: """
_lowerCamelCase , _lowerCamelCase =map(int, input(prompt).split(""","""))
_lowerCamelCase , _lowerCamelCase =map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
_lowerCamelCase =res(xa, ya)
_lowerCamelCase =res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 681 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"""tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase ={
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
_UpperCAmelCase : str = None
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="<unk>" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="<pad>" , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , add_prefix_space=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , **__magic_name__ , )
lowerCamelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
lowerCamelCase : Any = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase : Union[str, Any] = add_prefix_space
lowerCamelCase : int = pre_tok_class(**__magic_name__ )
lowerCamelCase : Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : str = kwargs.get("""is_split_into_words""" , __magic_name__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Tuple = kwargs.get("""is_split_into_words""" , __magic_name__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Optional[Any] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__magic_name__ , add_special_tokens=__magic_name__ ) + [self.eos_token_id] )
if len(__magic_name__ ) > self.model_max_length:
lowerCamelCase : str = input_ids[-self.model_max_length :]
return input_ids
| 681 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 681 | 1 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""torch""", """scipy"""]
def __init__( self , *__magic_name__ , **__magic_name__ ):
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def UpperCamelCase__ ( cls , *__magic_name__ , **__magic_name__ ):
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def UpperCamelCase__ ( cls , *__magic_name__ , **__magic_name__ ):
requires_backends(cls , ["""torch""", """scipy"""] )
| 681 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self , __magic_name__ , __magic_name__=2 , __magic_name__=True , __magic_name__=False , __magic_name__=1_0 , __magic_name__=3 , __magic_name__=3_2 * 8 , __magic_name__=3_2 * 8 , __magic_name__=4 , __magic_name__=6_4 , ):
lowerCamelCase : Optional[Any] = parent
lowerCamelCase : List[str] = batch_size
lowerCamelCase : Union[str, Any] = is_training
lowerCamelCase : str = use_auxiliary_loss
lowerCamelCase : Union[str, Any] = num_queries
lowerCamelCase : Tuple = num_channels
lowerCamelCase : List[str] = min_size
lowerCamelCase : Union[str, Any] = max_size
lowerCamelCase : str = num_labels
lowerCamelCase : List[str] = hidden_dim
lowerCamelCase : Union[str, Any] = hidden_dim
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__magic_name__ )
lowerCamelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__magic_name__ )
lowerCamelCase : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__magic_name__ ) > 0.5
).float()
lowerCamelCase : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=__magic_name__ ) > 0.5).long()
lowerCamelCase : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase__ ( self ):
lowerCamelCase : int = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCamelCase : Tuple = self.num_queries
lowerCamelCase : Optional[Any] = self.num_labels
lowerCamelCase : Union[str, Any] = [1, 1, 1, 1]
lowerCamelCase : Any = self.num_channels
lowerCamelCase : Optional[Any] = 6_4
lowerCamelCase : Dict = 1_2_8
lowerCamelCase : Tuple = self.hidden_dim
lowerCamelCase : Dict = self.hidden_dim
lowerCamelCase : Dict = self.hidden_dim
return config
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = self.prepare_config_and_inputs()
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = output.encoder_hidden_states
lowerCamelCase : str = output.pixel_decoder_hidden_states
lowerCamelCase : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , config.decoder_layers )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ):
with torch.no_grad():
lowerCamelCase : Dict = MaskaFormerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : int = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , output_hidden_states=__magic_name__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = MaskaFormerForUniversalSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
def comm_check_on_output(__magic_name__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
lowerCamelCase : List[Any] = model(__magic_name__ )
comm_check_on_output(__magic_name__ )
lowerCamelCase : int = model(
pixel_values=__magic_name__ , pixel_mask=__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
comm_check_on_output(__magic_name__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_UpperCAmelCase : Tuple = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
_UpperCAmelCase : int = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : int = False
_UpperCAmelCase : Dict = False
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = MaskaFormerModelTester(self )
lowerCamelCase : List[str] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__magic_name__ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def UpperCamelCase__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Dict = model_class(__magic_name__ )
lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCamelCase : int = MaskaFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = (self.model_tester.min_size,) * 2
lowerCamelCase : int = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__magic_name__ ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=__magic_name__ ),
"""class_labels""": torch.zeros(2 , 1_0 , device=__magic_name__ ).long(),
}
lowerCamelCase : int = self.model_tester.get_config()
lowerCamelCase : Optional[int] = MaskaFormerForUniversalSegmentation(__magic_name__ ).to(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : str = model_class(__magic_name__ ).to(__magic_name__ )
lowerCamelCase : Any = model(**__magic_name__ , output_attentions=__magic_name__ )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
lowerCamelCase : List[Any] = self.all_model_classes[1]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
lowerCamelCase : Dict = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.all_model_classes[1]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : Optional[int] = True
lowerCamelCase : List[str] = True
lowerCamelCase : int = model_class(__magic_name__ ).to(__magic_name__ )
model.train()
lowerCamelCase : str = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
lowerCamelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCamelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCamelCase : Any = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__magic_name__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCamelCase =1E-4
def _a ( ):
lowerCamelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCamelCase__ ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__magic_name__ )
lowerCamelCase : Optional[int] = self.default_image_processor
lowerCamelCase : Tuple = prepare_img()
lowerCamelCase : int = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
lowerCamelCase : str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
lowerCamelCase : List[Any] = model(**__magic_name__ )
lowerCamelCase : Tuple = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
lowerCamelCase : Any = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
lowerCamelCase : int = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__magic_name__ ).eval()
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Any = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**__magic_name__ )
# masks_queries_logits
lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCamelCase : Union[str, Any] = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
lowerCamelCase : int = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
lowerCamelCase : int = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCamelCase : Optional[int] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__magic_name__ ).eval()
lowerCamelCase : int = self.default_image_processor
lowerCamelCase : int = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
lowerCamelCase : Tuple = inputs["""pixel_values"""].to(__magic_name__ )
lowerCamelCase : str = [el.to(__magic_name__ ) for el in inputs["""mask_labels"""]]
lowerCamelCase : List[Any] = [el.to(__magic_name__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCamelCase : List[Any] = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
| 681 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCamelCase =logging.get_logger(__name__)
def _a ( lowerCamelCase, lowerCamelCase ):
try:
with open(lowerCamelCase, """rb""" ) as flax_state_f:
lowerCamelCase : Optional[Any] = from_bytes(lowerCamelCase, flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCamelCase : Tuple = flatten_dict(jax.tree_util.tree_map(lambda lowerCamelCase : x.dtype == jnp.bfloataa, lowerCamelCase ) ).values()
if any(lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCamelCase : Union[str, Any] = jax.tree_util.tree_map(
lambda lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, lowerCamelCase )
lowerCamelCase : Any = """"""
lowerCamelCase : Tuple = flatten_dict(lowerCamelCase, sep=""".""" )
lowerCamelCase : Optional[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCamelCase : List[Any] = []
lowerCamelCase : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase : List[str] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCamelCase : int = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase : Union[str, Any] = jnp.transpose(lowerCamelCase, (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCamelCase : Tuple = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase : str = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCamelCase : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCamelCase ):
lowerCamelCase : Union[str, Any] = (
flax_key_tuple_string.replace("""_0""", """.0""" )
.replace("""_1""", """.1""" )
.replace("""_2""", """.2""" )
.replace("""_3""", """.3""" )
.replace("""_4""", """.4""" )
.replace("""_5""", """.5""" )
.replace("""_6""", """.6""" )
.replace("""_7""", """.7""" )
.replace("""_8""", """.8""" )
.replace("""_9""", """.9""" )
)
lowerCamelCase : int = """.""".join(lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase : int = np.asarray(lowerCamelCase ) if not isinstance(lowerCamelCase, np.ndarray ) else flax_tensor
lowerCamelCase : Tuple = torch.from_numpy(lowerCamelCase )
# remove from missing keys
missing_keys.remove(lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCamelCase )
pt_model.load_state_dict(lowerCamelCase )
# re-transform missing_keys to list
lowerCamelCase : Any = list(lowerCamelCase )
if len(lowerCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(lowerCamelCase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
return pt_model
| 681 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
lowerCamelCase : List[Any] = BlipImageProcessor()
lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase : List[Any] = BlipProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **__magic_name__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def UpperCamelCase__ ( self , **__magic_name__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase : Tuple = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
lowerCamelCase : Dict = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : Dict = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Optional[int] = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = image_processor(__magic_name__ , return_tensors="""np""" )
lowerCamelCase : Optional[Any] = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : Optional[Any] = self.get_tokenizer()
lowerCamelCase : Tuple = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Any = """lower newer"""
lowerCamelCase : int = processor(text=__magic_name__ )
lowerCamelCase : List[str] = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Union[str, Any] = """lower newer"""
lowerCamelCase : str = self.prepare_image_inputs()
lowerCamelCase : List[Any] = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Optional[int] = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : Optional[Any] = processor.batch_decode(__magic_name__ )
lowerCamelCase : int = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.get_image_processor()
lowerCamelCase : int = self.get_tokenizer()
lowerCamelCase : Tuple = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : List[str] = """lower newer"""
lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase : int = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 681 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_lowerCamelCase =None
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase ={
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCamelCase ={
"""t5-small""": 5_1_2,
"""t5-base""": 5_1_2,
"""t5-large""": 5_1_2,
"""t5-3b""": 5_1_2,
"""t5-11b""": 5_1_2,
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
_UpperCAmelCase : Optional[Any] = TaTokenizer
_UpperCAmelCase : List[int] = []
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__="</s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__=1_0_0 , __magic_name__=None , **__magic_name__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase : str = [F'''<extra_id_{i}>''' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCamelCase : Any = len(set(filter(lambda __magic_name__ : bool("""extra_id_""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
lowerCamelCase : str = vocab_file
lowerCamelCase : Any = False if not self.vocab_file else True
lowerCamelCase : List[str] = extra_ids
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCamelCase : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __magic_name__ , )
return max_model_length
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Optional[Any] = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCamelCase : Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase__ ( self ):
return list(
set(filter(lambda __magic_name__ : bool(re.search(r"""<extra_id_\d+>""" , __magic_name__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase__ ( self ):
return [self.convert_tokens_to_ids(__magic_name__ ) for token in self.get_sentinel_tokens()]
| 681 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCamelCase ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCamelCase ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase : Any = numpy_to_pil(lowerCamelCase )
return images
def _a ( lowerCamelCase ):
if images.ndim == 3:
lowerCamelCase : Optional[Any] = images[None, ...]
lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images]
else:
lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 681 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _a ( lowerCamelCase="" ):
lowerCamelCase : int = tempfile.mkdtemp()
return os.path.join(lowerCamelCase, str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
lowerCamelCase : Union[str, Any] = AgentAudio(__magic_name__ )
lowerCamelCase : Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__magic_name__ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__magic_name__ ) )
# Ensure that the file contains the same value as the original tensor
lowerCamelCase , lowerCamelCase : List[str] = sf.read(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , torch.tensor(__magic_name__ ) , atol=1e-4 ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
lowerCamelCase : Optional[int] = get_new_path(suffix=""".wav""" )
sf.write(__magic_name__ , __magic_name__ , 1_6_0_0_0 )
lowerCamelCase : Dict = AgentAudio(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , __magic_name__ )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
lowerCamelCase : Dict = AgentImage(__magic_name__ )
lowerCamelCase : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__magic_name__ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
lowerCamelCase : Optional[Any] = Image.open(__magic_name__ )
lowerCamelCase : List[str] = AgentImage(__magic_name__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
lowerCamelCase : Tuple = Image.open(__magic_name__ )
lowerCamelCase : List[Any] = AgentImage(__magic_name__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__magic_name__ ) )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """Hey!"""
lowerCamelCase : str = AgentText(__magic_name__ )
self.assertEqual(__magic_name__ , agent_type.to_string() )
self.assertEqual(__magic_name__ , agent_type.to_raw() )
self.assertEqual(__magic_name__ , __magic_name__ )
| 681 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = """van"""
def __init__( self , __magic_name__=2_2_4 , __magic_name__=3 , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[6_4, 1_2_8, 3_2_0, 5_1_2] , __magic_name__=[3, 3, 1_2, 3] , __magic_name__=[8, 8, 4, 4] , __magic_name__="gelu" , __magic_name__=0.02 , __magic_name__=1e-6 , __magic_name__=1e-2 , __magic_name__=0.0 , __magic_name__=0.0 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Tuple = image_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : str = hidden_sizes
lowerCamelCase : Any = depths
lowerCamelCase : int = mlp_ratios
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Dict = initializer_range
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : Union[str, Any] = layer_scale_init_value
lowerCamelCase : int = drop_path_rate
lowerCamelCase : List[Any] = dropout_rate
| 681 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="""▁"""
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = BertGenerationTokenizer
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """<s>"""
lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """Hello World!"""
lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase : str = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase : Dict = """ """.join(__magic_name__ )
lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = BertGenerationConfig()
lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 681 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCamelCase =logging.getLogger(__name__)
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ):
lowerCamelCase : Dict = bnb_quantization_config.load_in_abit
lowerCamelCase : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
lowerCamelCase : Optional[Any] = []
# custom device map
if isinstance(lowerCamelCase, lowerCamelCase ) and len(device_map.keys() ) > 1:
lowerCamelCase : int = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase : List[Any] = get_keys_to_not_convert(lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCamelCase )
lowerCamelCase : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCamelCase )
# compatibility with peft
lowerCamelCase : Dict = load_in_abit
lowerCamelCase : Tuple = load_in_abit
lowerCamelCase : Any = get_parameter_device(lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
lowerCamelCase : Any = replace_with_bnb_layers(lowerCamelCase, lowerCamelCase, modules_to_not_convert=lowerCamelCase )
# convert param to the right dtype
lowerCamelCase : List[str] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase : List[Any] = name.replace(""".weight""", """""" ).replace(""".bias""", """""" )
lowerCamelCase : Optional[Any] = getattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCamelCase ):
param.to(lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowerCamelCase : Union[str, Any] = replace_with_bnb_layers(
lowerCamelCase, lowerCamelCase, modules_to_not_convert=lowerCamelCase )
lowerCamelCase : Union[str, Any] = get_quantized_model_device_map(
lowerCamelCase, lowerCamelCase, lowerCamelCase, max_memory=lowerCamelCase, no_split_module_classes=lowerCamelCase, )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase : Any = True
lowerCamelCase : str = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
lowerCamelCase, lowerCamelCase, lowerCamelCase, dtype=bnb_quantization_config.torch_dtype, offload_folder=lowerCamelCase, offload_state_dict=lowerCamelCase, keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules, offload_abit_bnb=load_in_abit and offload, )
return dispatch_model(lowerCamelCase, device_map=lowerCamelCase, offload_dir=lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None ):
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase : Dict = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(lowerCamelCase, lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
lowerCamelCase : Optional[int] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase : Tuple = {}
lowerCamelCase : Union[str, Any] = special_dtypes
lowerCamelCase : int = no_split_module_classes
lowerCamelCase : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase : List[Any] = get_balanced_memory(
lowerCamelCase, low_zero=(device_map == """balanced_low_0"""), max_memory=lowerCamelCase, **lowerCamelCase, )
lowerCamelCase : Dict = max_memory
lowerCamelCase : Dict = infer_auto_device_map(lowerCamelCase, **lowerCamelCase )
if isinstance(lowerCamelCase, lowerCamelCase ):
# check if don't have any quantized module on the cpu
lowerCamelCase : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None ):
if modules_to_not_convert is None:
lowerCamelCase : int = []
lowerCamelCase , lowerCamelCase : Union[str, Any] = _replace_with_bnb_layers(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, ):
lowerCamelCase : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase : Optional[Any] = []
current_key_name.append(lowerCamelCase )
if isinstance(lowerCamelCase, nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase : Any = """.""".join(lowerCamelCase )
lowerCamelCase : Optional[int] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase : List[Any] = bnb.nn.LinearabitLt(
module.in_features, module.out_features, module.bias is not None, has_fpaa_weights=lowerCamelCase, threshold=bnb_quantization_config.llm_inta_threshold, )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase : Union[str, Any] = bnb.nn.Linearabit(
module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_abit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant, quant_type=bnb_quantization_config.bnb_abit_quant_type, )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
lowerCamelCase : Any = module.weight.data
if module.bias is not None:
lowerCamelCase : int = module.bias.data
bnb_module.requires_grad_(lowerCamelCase )
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Any = True
if len(list(module.children() ) ) > 0:
lowerCamelCase , lowerCamelCase : str = _replace_with_bnb_layers(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : int = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( lowerCamelCase ):
# Create a copy of the model
with init_empty_weights():
lowerCamelCase : Optional[int] = deepcopy(lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase : Optional[Any] = find_tied_parameters(lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Tuple = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
lowerCamelCase : List[str] = sum(lowerCamelCase, [] )
lowerCamelCase : List[Any] = len(lowerCamelCase ) > 0
# Check if it is a base model
lowerCamelCase : str = False
if hasattr(lowerCamelCase, """base_model_prefix""" ):
lowerCamelCase : List[str] = not hasattr(lowerCamelCase, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase : Optional[Any] = list(model.named_children() )
lowerCamelCase : Union[str, Any] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase : List[str] = set(lowerCamelCase ) - set(lowerCamelCase )
lowerCamelCase : Optional[int] = list(set(lowerCamelCase ) ) + list(lowerCamelCase )
# remove ".weight" from the keys
lowerCamelCase : Optional[Any] = [""".weight""", """.bias"""]
lowerCamelCase : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase : Dict = name.replace(lowerCamelCase, """""" )
filtered_module_names.append(lowerCamelCase )
return filtered_module_names
def _a ( lowerCamelCase ):
for m in model.modules():
if isinstance(lowerCamelCase, bnb.nn.Linearabit ):
return True
return False
def _a ( lowerCamelCase ):
return next(parameter.parameters() ).device
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCamelCase, lowerCamelCase, 0, dtype=lowerCamelCase, value=lowerCamelCase )
lowerCamelCase : Union[str, Any] = param_name
lowerCamelCase : Dict = model
if "." in tensor_name:
lowerCamelCase : Union[str, Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
lowerCamelCase : Dict = getattr(lowerCamelCase, lowerCamelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowerCamelCase : Optional[Any] = new_module
lowerCamelCase : List[str] = splits[-1]
# offload weights
lowerCamelCase : Optional[Any] = False
offload_weight(module._parameters[tensor_name], lowerCamelCase, lowerCamelCase, index=lowerCamelCase )
if hasattr(module._parameters[tensor_name], """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB, param_name.replace("""weight""", """SCB""" ), lowerCamelCase, index=lowerCamelCase, )
else:
offload_weight(lowerCamelCase, lowerCamelCase, lowerCamelCase, index=lowerCamelCase )
offload_weight(lowerCamelCase, param_name.replace("""weight""", """SCB""" ), lowerCamelCase, index=lowerCamelCase )
set_module_tensor_to_device(lowerCamelCase, lowerCamelCase, """meta""", dtype=lowerCamelCase, value=torch.empty(*param.size() ) )
| 681 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase =HfArgumentParser(InitializationArguments)
_lowerCamelCase =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 681 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _a ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase ):
requests.request("""GET""", """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""", """https://huggingface.co""", timeout=1.0 )
@pytest.mark.integration
def _a ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""", """https://huggingface.co""" )
def _a ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase ):
http_head("""https://huggingface.co""" )
| 681 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self , __magic_name__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """sshleifer/tiny-gpt2"""
lowerCamelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """sgugger/tiny-distilbert-classification"""
lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """sshleifer/tiny-gpt2"""
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , )
lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , """sequential""" ) )
self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) )
self.assertTrue(hasattr(__magic_name__ , """current""" ) )
self.assertTrue(hasattr(__magic_name__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
| 681 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 681 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _a ( lowerCamelCase ):
return x + 2
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """x = 3"""
lowerCamelCase : Tuple = {}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
lowerCamelCase : Optional[int] = """x = y"""
lowerCamelCase : Tuple = {"""y""": 5}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """y = add_two(x)"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """x = 3"""
lowerCamelCase : Dict = {}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """x = 3\ny = 5"""
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """text = f'This is x: {x}.'"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowerCamelCase : Tuple = {"""x""": 3}
lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} )
lowerCamelCase : Tuple = {"""x""": 8}
lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = """test_list = [x, add_two(x)]"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """y = x"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowerCamelCase : Any = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowerCamelCase : Dict = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i"""
lowerCamelCase : int = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
| 681 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCamelCase ="""Create a default config file for Accelerate with only a few flags set."""
def _a ( lowerCamelCase="no", lowerCamelCase = default_json_config_file, lowerCamelCase = False ):
lowerCamelCase : List[Any] = Path(lowerCamelCase )
path.parent.mkdir(parents=lowerCamelCase, exist_ok=lowerCamelCase )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowerCamelCase : Union[str, Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowerCamelCase : Union[str, Any] = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowerCamelCase : Any = torch.cuda.device_count()
lowerCamelCase : Optional[Any] = num_gpus
lowerCamelCase : Optional[int] = False
if num_gpus > 1:
lowerCamelCase : Any = """MULTI_GPU"""
else:
lowerCamelCase : Optional[int] = """NO"""
elif is_xpu_available() and use_xpu:
lowerCamelCase : str = torch.xpu.device_count()
lowerCamelCase : Tuple = num_xpus
lowerCamelCase : List[str] = False
if num_xpus > 1:
lowerCamelCase : str = """MULTI_XPU"""
else:
lowerCamelCase : List[Any] = """NO"""
elif is_npu_available():
lowerCamelCase : List[str] = torch.npu.device_count()
lowerCamelCase : Tuple = num_npus
lowerCamelCase : List[Any] = False
if num_npus > 1:
lowerCamelCase : Union[str, Any] = """MULTI_NPU"""
else:
lowerCamelCase : Any = """NO"""
else:
lowerCamelCase : int = 0
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = 1
lowerCamelCase : int = """NO"""
lowerCamelCase : Optional[int] = ClusterConfig(**lowerCamelCase )
config.to_json_file(lowerCamelCase )
return path
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = parser.add_parser("""default""", parents=lowerCamelCase, help=lowerCamelCase, formatter_class=lowerCamelCase )
parser.add_argument(
"""--config_file""", default=lowerCamelCase, help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
), dest="""save_location""", )
parser.add_argument(
"""--mixed_precision""", choices=["""no""", """fp16""", """bf16"""], type=lowerCamelCase, help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""", default="""no""", )
parser.set_defaults(func=lowerCamelCase )
return parser
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 681 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : int = 0
lowerCamelCase : Optional[Any] = len(lowerCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase : Optional[int] = i + 1
else:
lowerCamelCase : Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 681 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCamelCase =logging.get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = question_encoder
lowerCamelCase : Dict = generator
lowerCamelCase : Tuple = self.question_encoder
def UpperCamelCase__ ( self , __magic_name__ ):
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" )
lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ )
if config is None:
lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ):
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.question_encoder
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.generator
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __magic_name__ , )
if max_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : int = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : Dict = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[Any] = labels["""input_ids"""]
return model_inputs
| 681 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCamelCase =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCamelCase =""" \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
lowerCamelCase : Any = self.diffusers_dir
shutil.copy(
os.path.join(__magic_name__ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
lowerCamelCase : Tuple = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowerCamelCase : Any = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowerCamelCase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
lowerCamelCase : List[str] = black.format_str(__magic_name__ , mode=__magic_name__ )
lowerCamelCase : Optional[Any] = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(__magic_name__ , """w""" , newline="""\n""" ) as f:
f.write(__magic_name__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__magic_name__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__magic_name__ )
with open(__magic_name__ , """r""" ) as f:
self.assertTrue(f.read() , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , __magic_name__ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , __magic_name__ ) , )
# Copy consistency with a really long name
lowerCamelCase : int = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("""Bert""" , __magic_name__ , __magic_name__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , __magic_name__ , overwrite_result=re.sub("""DDPM""" , """Test""" , __magic_name__ ) , )
| 681 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = F'''{sampling_rate}'''
lowerCamelCase : Optional[int] = """1"""
lowerCamelCase : Any = """f32le"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ):
lowerCamelCase : Dict = F'''{sampling_rate}'''
lowerCamelCase : List[Any] = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Any = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Dict = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase : Dict = platform.system()
if system == "Linux":
lowerCamelCase : Union[str, Any] = """alsa"""
lowerCamelCase : List[Any] = """default"""
elif system == "Darwin":
lowerCamelCase : List[Any] = """avfoundation"""
lowerCamelCase : List[Any] = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Any = """default"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase )
for item in iterator:
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ):
if stream_chunk_s is not None:
lowerCamelCase : int = stream_chunk_s
else:
lowerCamelCase : Dict = chunk_length_s
lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase : Optional[int] = np.intaa
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : int = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase, (int, float) ):
lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s]
lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : List[Any] = datetime.datetime.now()
lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase )
lowerCamelCase : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : Optional[int] = B""""""
lowerCamelCase , lowerCamelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : str = (_stride_left, stride_right)
lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : Optional[int] = False
yield item
lowerCamelCase : str = stride_left
lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[Any] = False
yield item
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 681 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _a ( lowerCamelCase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def _a ( lowerCamelCase ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase : Tuple = ord(lowerCamelCase )
if not _is_chinese_char(lowerCamelCase ):
return 0
return 1
def _a ( lowerCamelCase ):
lowerCamelCase : Any = set()
for token in tokens:
lowerCamelCase : Dict = len(lowerCamelCase ) > 1 and is_chinese(lowerCamelCase )
if chinese_word:
word_set.add(lowerCamelCase )
lowerCamelCase : List[Any] = list(lowerCamelCase )
return word_list
def _a ( lowerCamelCase, lowerCamelCase ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase : Union[str, Any] = max([len(lowerCamelCase ) for w in chinese_word_set] )
lowerCamelCase : List[str] = bert_tokens
lowerCamelCase , lowerCamelCase : Union[str, Any] = 0, len(lowerCamelCase )
while start < end:
lowerCamelCase : List[str] = True
if is_chinese(bert_word[start] ):
lowerCamelCase : Optional[Any] = min(end - start, lowerCamelCase )
for i in range(lowerCamelCase, 1, -1 ):
lowerCamelCase : Optional[int] = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
lowerCamelCase : Optional[int] = """##""" + bert_word[j]
lowerCamelCase : Any = start + i
lowerCamelCase : Dict = False
break
if single_word:
start += 1
return bert_word
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = []
for i in range(0, len(lowerCamelCase ), 100 ):
lowerCamelCase : List[str] = ltp_tokenizer.pipeline(lines[i : i + 100], tasks=["""cws"""] ).cws
lowerCamelCase : Union[str, Any] = [get_chinese_word(lowerCamelCase ) for r in res]
ltp_res.extend(lowerCamelCase )
assert len(lowerCamelCase ) == len(lowerCamelCase )
lowerCamelCase : Union[str, Any] = []
for i in range(0, len(lowerCamelCase ), 100 ):
lowerCamelCase : str = bert_tokenizer(lines[i : i + 100], add_special_tokens=lowerCamelCase, truncation=lowerCamelCase, max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(lowerCamelCase ) == len(lowerCamelCase )
lowerCamelCase : List[Any] = []
for input_ids, chinese_word in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = []
for id in input_ids:
lowerCamelCase : Dict = bert_tokenizer._convert_id_to_token(lowerCamelCase )
input_tokens.append(lowerCamelCase )
lowerCamelCase : Optional[Any] = add_sub_symbol(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase ):
if token[:2] == "##":
lowerCamelCase : Dict = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase ) == 1 and _is_chinese_char(ord(lowerCamelCase ) ):
ref_id.append(lowerCamelCase )
ref_ids.append(lowerCamelCase )
assert len(lowerCamelCase ) == len(lowerCamelCase )
return ref_ids
def _a ( lowerCamelCase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
lowerCamelCase : Dict = f.readlines()
lowerCamelCase : Optional[Any] = [line.strip() for line in data if len(lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase : List[Any] = LTP(args.ltp ) # faster in GPU device
lowerCamelCase : Any = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase : Tuple = prepare_ref(lowerCamelCase, lowerCamelCase, lowerCamelCase )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
lowerCamelCase : Optional[int] = [json.dumps(lowerCamelCase ) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_lowerCamelCase =parser.parse_args()
main(args)
| 681 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , )
assert hasattr(self , """env""" )
def UpperCamelCase__ ( self , __magic_name__ ):
# configuration for running training on smdistributed Model Parallel
lowerCamelCase : Any = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCamelCase : Any = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , )
def UpperCamelCase__ ( self , __magic_name__ ):
TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCamelCase__ ( self , __magic_name__ ):
# create estimator
lowerCamelCase : int = self.create_estimator(__magic_name__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
| 681 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681 |
from __future__ import annotations
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = str(lowerCamelCase )
return n == n[::-1]
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = 0
for i in range(1, lowerCamelCase ):
if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 681 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : int = position
lowerCamelCase : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase : Any = []
for position in positions:
lowerCamelCase , lowerCamelCase : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCamelCase )
return permissible_positions
def _a ( lowerCamelCase ):
return not any(elem == 0 for row in board for elem in row )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if is_complete(lowerCamelCase ):
return True
for position in get_valid_pos(lowerCamelCase, len(lowerCamelCase ) ):
lowerCamelCase , lowerCamelCase : Dict = position
if board[y][x] == 0:
lowerCamelCase : Optional[Any] = curr + 1
if open_knight_tour_helper(lowerCamelCase, lowerCamelCase, curr + 1 ):
return True
lowerCamelCase : int = 0
return False
def _a ( lowerCamelCase ):
lowerCamelCase : Any = [[0 for i in range(lowerCamelCase )] for j in range(lowerCamelCase )]
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
lowerCamelCase : Optional[Any] = 1
if open_knight_tour_helper(lowerCamelCase, (i, j), 1 ):
return board
lowerCamelCase : Dict = 0
lowerCamelCase : Any = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[Any] = """"""
else:
lowerCamelCase : Optional[int] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : Any = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCamelCase : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = dct.pop(lowerCamelCase )
lowerCamelCase : str = val
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = ViTMSNConfig()
lowerCamelCase : Tuple = 1000
lowerCamelCase : List[Any] = """datasets/huggingface/label-files"""
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) )
lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[int] = idalabel
lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase : int = 384
lowerCamelCase : Optional[int] = 1536
lowerCamelCase : Tuple = 6
elif "l16" in checkpoint_url:
lowerCamelCase : Dict = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Optional[int] = 24
lowerCamelCase : str = 16
lowerCamelCase : str = 0.1
elif "b4" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowerCamelCase : Tuple = 7
lowerCamelCase : Optional[int] = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Tuple = 24
lowerCamelCase : Dict = 16
lowerCamelCase : str = 0.1
lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase )
lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""]
lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase )
lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
lowerCamelCase : Union[str, Any] = ViTImageProcessor(
size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase )
lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : int = model(**lowerCamelCase )
lowerCamelCase : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 681 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowerCamelCase =get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_lowerCamelCase =get_tests_dir("""fixtures/vocab.json""")
_lowerCamelCase =get_tests_dir("""fixtures""")
class A__ ( unittest.TestCase):
_UpperCAmelCase : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = 0
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Tuple = WavaVecaConfig()
lowerCamelCase : Dict = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
lowerCamelCase : Dict = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
copyfile(__magic_name__ , os.path.join(__magic_name__ , """vocab.json""" ) )
lowerCamelCase : Tuple = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor()
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowerCamelCase : Union[str, Any] = WavaVecaProcessor(__magic_name__ , __magic_name__ )
# save in new folder
processor.save_pretrained(__magic_name__ )
# drop `processor_class` in tokenizer
with open(os.path.join(__magic_name__ , __magic_name__ ) , """r""" ) as f:
lowerCamelCase : List[Any] = json.load(__magic_name__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f:
f.write(json.dumps(__magic_name__ ) )
lowerCamelCase : Any = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : List[str] = WavaVecaFeatureExtractor()
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowerCamelCase : Dict = WavaVecaProcessor(__magic_name__ , __magic_name__ )
# save in new folder
processor.save_pretrained(__magic_name__ )
# drop `processor_class` in feature extractor
with open(os.path.join(__magic_name__ , __magic_name__ ) , """r""" ) as f:
lowerCamelCase : Dict = json.load(__magic_name__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f:
f.write(json.dumps(__magic_name__ ) )
lowerCamelCase : Tuple = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Tuple = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__magic_name__ )
# copy relevant files
copyfile(__magic_name__ , os.path.join(__magic_name__ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f:
f.write("""{}""" )
lowerCamelCase : Dict = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__magic_name__ ):
lowerCamelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__magic_name__ ):
lowerCamelCase : List[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
lowerCamelCase : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowerCamelCase : List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowerCamelCase : List[str] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCamelCase : Tuple = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
lowerCamelCase : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def UpperCamelCase__ ( self ):
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoFeatureExtractor.register(__magic_name__ , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoProcessor.register(__magic_name__ , __magic_name__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoProcessor.register(__magic_name__ , __magic_name__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase : Any = CustomFeatureExtractor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Optional[Any] = os.path.join(__magic_name__ , """vocab.txt""" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCamelCase : Union[str, Any] = CustomTokenizer(__magic_name__ )
lowerCamelCase : Dict = CustomProcessor(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__magic_name__ )
lowerCamelCase : List[str] = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ):
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = False
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = False
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """AutoFeatureExtractor"""
_UpperCAmelCase : Union[str, Any] = """AutoTokenizer"""
_UpperCAmelCase : List[str] = False
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoFeatureExtractor.register(__magic_name__ , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoProcessor.register(__magic_name__ , __magic_name__ )
# If remote code is not set, the default is to use local classes.
lowerCamelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCamelCase : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCamelCase : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class A__ ( unittest.TestCase):
_UpperCAmelCase : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : Tuple = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : str = WavaVecaProcessor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__magic_name__ , """test-processor""" ) , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Dict = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(new_processor.feature_extractor , __magic_name__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = WavaVecaProcessor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__magic_name__ , """test-processor-org""" ) , push_to_hub=__magic_name__ , use_auth_token=self._token , organization="""valid_org""" , )
lowerCamelCase : int = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(new_processor.feature_extractor , __magic_name__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCamelCase__ ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCamelCase : str = CustomFeatureExtractor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = os.path.join(__magic_name__ , """vocab.txt""" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCamelCase : Union[str, Any] = CustomTokenizer(__magic_name__ )
lowerCamelCase : List[str] = CustomProcessor(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
lowerCamelCase : Any = Repository(__magic_name__ , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__magic_name__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) ) as f:
lowerCamelCase : List[Any] = json.load(__magic_name__ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowerCamelCase : Dict = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 681 |
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = AltDiffusionPipeline
_UpperCAmelCase : Tuple = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
lowerCamelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
lowerCamelCase : List[Any] = CLIPTextModel(__magic_name__ )
lowerCamelCase : List[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCamelCase : Optional[int] = 7_7
lowerCamelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=0 ):
if str(__magic_name__ ).startswith("""mps""" ):
lowerCamelCase : Union[str, Any] = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase : Dict = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Any = self.get_dummy_components()
torch.manual_seed(0 )
lowerCamelCase : Dict = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCamelCase : Any = RobertaSeriesModelWithTransformation(__magic_name__ )
lowerCamelCase : Dict = text_encoder
lowerCamelCase : Optional[int] = AltDiffusionPipeline(**__magic_name__ )
lowerCamelCase : Tuple = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Optional[int] = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : Tuple = """A photo of an astronaut"""
lowerCamelCase : List[Any] = alt_pipe(**__magic_name__ )
lowerCamelCase : Optional[int] = output.images
lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : List[str] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : str = PNDMScheduler(skip_prk_steps=__magic_name__ )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCamelCase : List[str] = RobertaSeriesModelWithTransformation(__magic_name__ )
lowerCamelCase : str = text_encoder
lowerCamelCase : Union[str, Any] = AltDiffusionPipeline(**__magic_name__ )
lowerCamelCase : Union[str, Any] = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : List[str] = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : List[Any] = alt_pipe(**__magic_name__ )
lowerCamelCase : List[str] = output.images
lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : Tuple = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
# make sure here that pndm scheduler skips prk
lowerCamelCase : Any = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__magic_name__ )
lowerCamelCase : int = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Dict = """A painting of a squirrel eating a burger"""
lowerCamelCase : Any = torch.manual_seed(0 )
lowerCamelCase : List[Any] = alt_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="""np""" )
lowerCamelCase : Union[str, Any] = output.images
lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase : List[str] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
lowerCamelCase : Tuple = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__magic_name__ , safety_checker=__magic_name__ )
lowerCamelCase : Optional[Any] = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
lowerCamelCase : int = torch.manual_seed(0 )
lowerCamelCase : Optional[int] = alt_pipe([prompt] , generator=__magic_name__ , num_inference_steps=2 , output_type="""numpy""" )
lowerCamelCase : List[Any] = output.images
lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase : Optional[int] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 681 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 | 1 |
from ...configuration_utils import PretrainedConfig
_lowerCamelCase ={
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """tapas"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0_2_4 , __magic_name__=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__=10.0 , __magic_name__=0 , __magic_name__=1.0 , __magic_name__=None , __magic_name__=1.0 , __magic_name__=False , __magic_name__=None , __magic_name__=1.0 , __magic_name__=1.0 , __magic_name__=False , __magic_name__=False , __magic_name__="ratio" , __magic_name__=None , __magic_name__=None , __magic_name__=6_4 , __magic_name__=3_2 , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : List[str] = max_position_embeddings
lowerCamelCase : Optional[int] = type_vocab_sizes
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Tuple = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCamelCase : List[str] = positive_label_weight
lowerCamelCase : List[Any] = num_aggregation_labels
lowerCamelCase : Optional[int] = aggregation_loss_weight
lowerCamelCase : Tuple = use_answer_as_supervision
lowerCamelCase : str = answer_loss_importance
lowerCamelCase : Any = use_normalized_answer_loss
lowerCamelCase : Any = huber_loss_delta
lowerCamelCase : Any = temperature
lowerCamelCase : Optional[int] = aggregation_temperature
lowerCamelCase : Union[str, Any] = use_gumbel_for_cells
lowerCamelCase : Dict = use_gumbel_for_aggregation
lowerCamelCase : List[Any] = average_approximation_function
lowerCamelCase : Tuple = cell_selection_preference
lowerCamelCase : Any = answer_loss_cutoff
lowerCamelCase : Optional[int] = max_num_rows
lowerCamelCase : Union[str, Any] = max_num_columns
lowerCamelCase : Dict = average_logits_per_cell
lowerCamelCase : Any = select_one_column
lowerCamelCase : str = allow_empty_column_selection
lowerCamelCase : Union[str, Any] = init_cell_selection_weights_to_zero
lowerCamelCase : int = reset_position_index_per_cell
lowerCamelCase : Any = disable_per_token_loss
# Aggregation hyperparameters
lowerCamelCase : int = aggregation_labels
lowerCamelCase : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , __magic_name__ ):
lowerCamelCase : Dict = {int(__magic_name__ ): v for k, v in aggregation_labels.items()}
| 681 |
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase : int = """OwlViTImageProcessor"""
_UpperCAmelCase : Optional[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __magic_name__=None , __magic_name__=None , **__magic_name__ ):
lowerCamelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
lowerCamelCase : int = kwargs.pop("""feature_extractor""" )
lowerCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="max_length" , __magic_name__="np" , **__magic_name__ ):
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__magic_name__ , __magic_name__ ) or (isinstance(__magic_name__ , __magic_name__ ) and not isinstance(text[0] , __magic_name__ )):
lowerCamelCase : List[str] = [self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )]
elif isinstance(__magic_name__ , __magic_name__ ) and isinstance(text[0] , __magic_name__ ):
lowerCamelCase : int = []
# Maximum number of queries across batch
lowerCamelCase : Tuple = max([len(__magic_name__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__magic_name__ ) != max_num_queries:
lowerCamelCase : Any = t + [""" """] * (max_num_queries - len(__magic_name__ ))
lowerCamelCase : Union[str, Any] = self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
encodings.append(__magic_name__ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
lowerCamelCase : Optional[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
lowerCamelCase : Union[str, Any] = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase : str = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
lowerCamelCase : Dict = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase : Optional[int] = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
lowerCamelCase : List[str] = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase : Optional[Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
lowerCamelCase : str = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
lowerCamelCase : List[str] = BatchEncoding()
lowerCamelCase : List[str] = input_ids
lowerCamelCase : Optional[int] = attention_mask
if query_images is not None:
lowerCamelCase : Tuple = BatchEncoding()
lowerCamelCase : Union[str, Any] = self.image_processor(
__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ).pixel_values
lowerCamelCase : Tuple = query_pixel_values
if images is not None:
lowerCamelCase : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.image_processor.post_process(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.image_processor.post_process_object_detection(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.image_processor.post_process_image_guided_detection(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 681 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ):
lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Union[str, Any] = max_resolution
lowerCamelCase : Union[str, Any] = do_resize
lowerCamelCase : int = size
lowerCamelCase : int = do_center_crop
lowerCamelCase : Union[str, Any] = crop_size
lowerCamelCase : Union[str, Any] = do_normalize
lowerCamelCase : Dict = image_mean
lowerCamelCase : Optional[Any] = image_std
lowerCamelCase : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase : Dict = []
for i in range(self.batch_size ):
lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ )
lowerCamelCase : Any = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 681 | 1 |
_lowerCamelCase ="""
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCamelCase =[{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCamelCase ={
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 681 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 681 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
lowerCamelCase , lowerCamelCase : int = coefficient_matrix.shape
lowerCamelCase , lowerCamelCase : List[str] = constant_matrix.shape
if rowsa != colsa:
lowerCamelCase : Optional[int] = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowerCamelCase )
if colsa != 1:
lowerCamelCase : List[str] = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowerCamelCase )
if rowsa != rowsa:
lowerCamelCase : int = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowerCamelCase )
if len(lowerCamelCase ) != rowsa:
lowerCamelCase : Optional[Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(lowerCamelCase )} and {rowsa}'''
)
raise ValueError(lowerCamelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowerCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
lowerCamelCase , lowerCamelCase : Optional[int] = table.shape
strictly_diagonally_dominant(lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase ):
lowerCamelCase : str = []
for row in range(lowerCamelCase ):
lowerCamelCase : Dict = 0
for col in range(lowerCamelCase ):
if col == row:
lowerCamelCase : Any = table[row][col]
elif col == cols - 1:
lowerCamelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase : Optional[int] = (temp + val) / denom
new_val.append(lowerCamelCase )
lowerCamelCase : Optional[int] = new_val
return [float(lowerCamelCase ) for i in new_val]
def _a ( lowerCamelCase ):
lowerCamelCase , lowerCamelCase : int = table.shape
lowerCamelCase : List[str] = True
for i in range(0, lowerCamelCase ):
lowerCamelCase : Dict = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class A__ :
def __init__( self , __magic_name__ , ):
lowerCamelCase : Dict = parent
lowerCamelCase : List[Any] = 1_3
lowerCamelCase : List[Any] = 7
lowerCamelCase : List[str] = True
lowerCamelCase : Any = True
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[str] = True
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = False
lowerCamelCase : int = False
lowerCamelCase : int = 2
lowerCamelCase : Union[str, Any] = 9_9
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Any = 3_2
lowerCamelCase : Tuple = 2
lowerCamelCase : int = 4
lowerCamelCase : Union[str, Any] = 0.1
lowerCamelCase : str = 0.1
lowerCamelCase : List[str] = 5_1_2
lowerCamelCase : Any = 1_6
lowerCamelCase : str = 2
lowerCamelCase : Union[str, Any] = 0.02
lowerCamelCase : Any = 3
lowerCamelCase : Optional[int] = 4
lowerCamelCase : Optional[int] = """last"""
lowerCamelCase : int = True
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[int] = 0
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase : int = None
if self.use_input_lengths:
lowerCamelCase : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase : List[str] = None
if self.use_token_type_ids:
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase : List[Any] = None
lowerCamelCase : Tuple = None
lowerCamelCase : List[Any] = None
if self.use_labels:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Dict = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Optional[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[Any] = TFFlaubertModel(config=__magic_name__ )
lowerCamelCase : Tuple = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowerCamelCase : List[str] = model(__magic_name__ )
lowerCamelCase : List[Any] = [input_ids, input_mask]
lowerCamelCase : Dict = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : int = TFFlaubertWithLMHeadModel(__magic_name__ )
lowerCamelCase : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowerCamelCase : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Optional[int] = TFFlaubertForQuestionAnsweringSimple(__magic_name__ )
lowerCamelCase : Tuple = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowerCamelCase : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : int = TFFlaubertForSequenceClassification(__magic_name__ )
lowerCamelCase : List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowerCamelCase : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Optional[Any] = self.num_labels
lowerCamelCase : int = TFFlaubertForTokenClassification(config=__magic_name__ )
lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Optional[int] = self.num_choices
lowerCamelCase : List[Any] = TFFlaubertForMultipleChoice(config=__magic_name__ )
lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : List[Any] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : int = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : Optional[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCAmelCase : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : int = False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = TFFlaubertModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , emb_dim=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict = TFFlaubertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
lowerCamelCase : Union[str, Any] = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase : Any = model(__magic_name__ )[0]
lowerCamelCase : Tuple = tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : List[Any] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 681 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _a ( lowerCamelCase, lowerCamelCase=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _a ( lowerCamelCase, lowerCamelCase=0 ):
lowerCamelCase : str = []
for old_item in old_list:
lowerCamelCase : int = old_item.replace("""in_layers.0""", """norm1""" )
lowerCamelCase : Optional[int] = new_item.replace("""in_layers.2""", """conv1""" )
lowerCamelCase : Optional[int] = new_item.replace("""out_layers.0""", """norm2""" )
lowerCamelCase : List[str] = new_item.replace("""out_layers.3""", """conv2""" )
lowerCamelCase : Optional[Any] = new_item.replace("""emb_layers.1""", """time_emb_proj""" )
lowerCamelCase : Optional[Any] = new_item.replace("""skip_connection""", """conv_shortcut""" )
lowerCamelCase : int = shave_segments(lowerCamelCase, n_shave_prefix_segments=lowerCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _a ( lowerCamelCase, lowerCamelCase=0 ):
lowerCamelCase : Tuple = []
for old_item in old_list:
lowerCamelCase : Optional[int] = old_item
lowerCamelCase : Optional[Any] = new_item.replace("""norm.weight""", """group_norm.weight""" )
lowerCamelCase : Optional[Any] = new_item.replace("""norm.bias""", """group_norm.bias""" )
lowerCamelCase : List[Any] = new_item.replace("""proj_out.weight""", """proj_attn.weight""" )
lowerCamelCase : Any = new_item.replace("""proj_out.bias""", """proj_attn.bias""" )
lowerCamelCase : Dict = shave_segments(lowerCamelCase, n_shave_prefix_segments=lowerCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None ):
assert isinstance(lowerCamelCase, lowerCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCamelCase : Dict = old_checkpoint[path]
lowerCamelCase : Any = old_tensor.shape[0] // 3
lowerCamelCase : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCamelCase : int = old_tensor.shape[0] // config["""num_head_channels"""] // 3
lowerCamelCase : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = old_tensor.split(channels // num_heads, dim=1 )
lowerCamelCase : Optional[int] = query.reshape(lowerCamelCase )
lowerCamelCase : Tuple = key.reshape(lowerCamelCase )
lowerCamelCase : Union[str, Any] = value.reshape(lowerCamelCase )
for path in paths:
lowerCamelCase : Union[str, Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCamelCase : Any = new_path.replace("""middle_block.0""", """mid_block.resnets.0""" )
lowerCamelCase : Optional[int] = new_path.replace("""middle_block.1""", """mid_block.attentions.0""" )
lowerCamelCase : Tuple = new_path.replace("""middle_block.2""", """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCamelCase : int = new_path.replace(replacement["""old"""], replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCamelCase : Optional[Any] = old_checkpoint[path["""old"""]][:, :, 0]
else:
lowerCamelCase : List[Any] = old_checkpoint[path["""old"""]]
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[Any] = checkpoint["""time_embed.0.weight"""]
lowerCamelCase : Any = checkpoint["""time_embed.0.bias"""]
lowerCamelCase : Tuple = checkpoint["""time_embed.2.weight"""]
lowerCamelCase : str = checkpoint["""time_embed.2.bias"""]
lowerCamelCase : Optional[Any] = checkpoint["""input_blocks.0.0.weight"""]
lowerCamelCase : Union[str, Any] = checkpoint["""input_blocks.0.0.bias"""]
lowerCamelCase : Dict = checkpoint["""out.0.weight"""]
lowerCamelCase : int = checkpoint["""out.0.bias"""]
lowerCamelCase : List[str] = checkpoint["""out.2.weight"""]
lowerCamelCase : Optional[int] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
lowerCamelCase : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
lowerCamelCase : List[Any] = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(lowerCamelCase )
}
# Retrieves the keys for the middle blocks only
lowerCamelCase : Any = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
lowerCamelCase : int = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(lowerCamelCase )
}
# Retrieves the keys for the output blocks only
lowerCamelCase : List[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
lowerCamelCase : Optional[int] = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(lowerCamelCase )
}
for i in range(1, lowerCamelCase ):
lowerCamelCase : str = (i - 1) // (config["""num_res_blocks"""] + 1)
lowerCamelCase : List[str] = (i - 1) % (config["""num_res_blocks"""] + 1)
lowerCamelCase : Any = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowerCamelCase : Optional[int] = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowerCamelCase : List[str] = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowerCamelCase : int = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowerCamelCase : List[str] = renew_resnet_paths(lowerCamelCase )
lowerCamelCase : Tuple = {"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowerCamelCase : List[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
lowerCamelCase, lowerCamelCase, lowerCamelCase, additional_replacements=[meta_path, resnet_op], config=lowerCamelCase )
if len(lowerCamelCase ):
lowerCamelCase : int = renew_attention_paths(lowerCamelCase )
lowerCamelCase : str = {
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCamelCase : Dict = {
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
lowerCamelCase, lowerCamelCase, lowerCamelCase, additional_replacements=[meta_path], attention_paths_to_split=lowerCamelCase, config=lowerCamelCase, )
lowerCamelCase : int = middle_blocks[0]
lowerCamelCase : Dict = middle_blocks[1]
lowerCamelCase : Optional[Any] = middle_blocks[2]
lowerCamelCase : Any = renew_resnet_paths(lowerCamelCase )
assign_to_checkpoint(lowerCamelCase, lowerCamelCase, lowerCamelCase, config=lowerCamelCase )
lowerCamelCase : int = renew_resnet_paths(lowerCamelCase )
assign_to_checkpoint(lowerCamelCase, lowerCamelCase, lowerCamelCase, config=lowerCamelCase )
lowerCamelCase : List[str] = renew_attention_paths(lowerCamelCase )
lowerCamelCase : List[Any] = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
lowerCamelCase, lowerCamelCase, lowerCamelCase, attention_paths_to_split=lowerCamelCase, config=lowerCamelCase )
for i in range(lowerCamelCase ):
lowerCamelCase : Any = i // (config["""num_res_blocks"""] + 1)
lowerCamelCase : Optional[int] = i % (config["""num_res_blocks"""] + 1)
lowerCamelCase : Union[str, Any] = [shave_segments(lowerCamelCase, 2 ) for name in output_blocks[i]]
lowerCamelCase : int = {}
for layer in output_block_layers:
lowerCamelCase , lowerCamelCase : Optional[Any] = layer.split(""".""" )[0], shave_segments(lowerCamelCase, 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCamelCase )
else:
lowerCamelCase : str = [layer_name]
if len(lowerCamelCase ) > 1:
lowerCamelCase : Union[str, Any] = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowerCamelCase : Optional[int] = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowerCamelCase : Optional[Any] = renew_resnet_paths(lowerCamelCase )
lowerCamelCase : int = renew_resnet_paths(lowerCamelCase )
lowerCamelCase : Any = {"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(lowerCamelCase, lowerCamelCase, lowerCamelCase, additional_replacements=[meta_path], config=lowerCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCamelCase : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
lowerCamelCase : Tuple = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowerCamelCase : Tuple = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(lowerCamelCase ) == 2:
lowerCamelCase : List[str] = []
if len(lowerCamelCase ):
lowerCamelCase : str = renew_attention_paths(lowerCamelCase )
lowerCamelCase : Any = {
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCamelCase : List[Any] = {
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
lowerCamelCase, lowerCamelCase, lowerCamelCase, additional_replacements=[meta_path], attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None, config=lowerCamelCase, )
else:
lowerCamelCase : int = renew_resnet_paths(lowerCamelCase, n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCamelCase : Dict = """.""".join(["""output_blocks""", str(lowerCamelCase ), path["""old"""]] )
lowerCamelCase : Any = """.""".join(["""up_blocks""", str(lowerCamelCase ), """resnets""", str(lowerCamelCase ), path["""new"""]] )
lowerCamelCase : int = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowerCamelCase =json.loads(f.read())
_lowerCamelCase =convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowerCamelCase =UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowerCamelCase =DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowerCamelCase =VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowerCamelCase =LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 681 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_lowerCamelCase =None
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase ={
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase ={
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
_lowerCamelCase ="""▁"""
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = BigBirdTokenizer
_UpperCAmelCase : Any = ["""input_ids""", """attention_mask"""]
_UpperCAmelCase : List[int] = []
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__="<unk>" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="<pad>" , __magic_name__="[SEP]" , __magic_name__="[MASK]" , __magic_name__="[CLS]" , **__magic_name__ , ):
lowerCamelCase : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token
lowerCamelCase : Any = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
lowerCamelCase : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
lowerCamelCase : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
lowerCamelCase : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token
lowerCamelCase : Any = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : List[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : Any = False if not self.vocab_file else True
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Optional[Any] = [self.sep_token_id]
lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : List[str] = [self.sep_token_id]
lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Any = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 681 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCamelCase ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCamelCase ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase : Any = numpy_to_pil(lowerCamelCase )
return images
def _a ( lowerCamelCase ):
if images.ndim == 3:
lowerCamelCase : Optional[Any] = images[None, ...]
lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images]
else:
lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 681 | 1 |
from torch import nn
class A__ ( nn.Module):
def __init__( self , __magic_name__ , __magic_name__ ):
super().__init__()
lowerCamelCase : Dict = class_size
lowerCamelCase : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCamelCase : int = nn.Linear(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCamelCase : Union[str, Any] = self.mlp(__magic_name__ )
return logits
| 681 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 | 1 |
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="""▁"""
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = BertGenerationTokenizer
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """<s>"""
lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """Hello World!"""
lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase : str = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase : Dict = """ """.join(__magic_name__ )
lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = BertGenerationConfig()
lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 681 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase ={
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase =HfArgumentParser(InitializationArguments)
_lowerCamelCase =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 681 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ ):
lowerCamelCase : int = parent
def UpperCamelCase__ ( self ):
return {}
def _a ( ):
lowerCamelCase : Tuple = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
lowerCamelCase : List[Any] = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def UpperCamelCase__ ( self ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase__ ( self ):
# Initialize feature_extractor
lowerCamelCase : str = self.feature_extraction_class()
# Test not batched input
lowerCamelCase : Union[str, Any] = get_html_strings()[0]
lowerCamelCase : str = feature_extractor(__magic_name__ )
# fmt: off
lowerCamelCase : Any = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
lowerCamelCase : Tuple = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , __magic_name__ )
self.assertEqual(encoding.xpaths , __magic_name__ )
# Test batched
lowerCamelCase : Any = get_html_strings()
lowerCamelCase : Dict = feature_extractor(__magic_name__ )
# fmt: off
lowerCamelCase : Optional[int] = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
lowerCamelCase : Optional[Any] = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __magic_name__ )
self.assertEqual(encoding.xpaths , __magic_name__ )
| 681 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self , __magic_name__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """sshleifer/tiny-gpt2"""
lowerCamelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """sgugger/tiny-distilbert-classification"""
lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """sshleifer/tiny-gpt2"""
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , )
lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , """sequential""" ) )
self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) )
self.assertTrue(hasattr(__magic_name__ , """current""" ) )
self.assertTrue(hasattr(__magic_name__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
| 681 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : Tuple = BertConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Optional[int] = BertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 681 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _a ( lowerCamelCase ):
return x + 2
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """x = 3"""
lowerCamelCase : Tuple = {}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
lowerCamelCase : Optional[int] = """x = y"""
lowerCamelCase : Tuple = {"""y""": 5}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """y = add_two(x)"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """x = 3"""
lowerCamelCase : Dict = {}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """x = 3\ny = 5"""
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """text = f'This is x: {x}.'"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowerCamelCase : Tuple = {"""x""": 3}
lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} )
lowerCamelCase : Tuple = {"""x""": 8}
lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = """test_list = [x, add_two(x)]"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """y = x"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowerCamelCase : Any = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowerCamelCase : Dict = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i"""
lowerCamelCase : int = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
| 681 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( __SCREAMING_SNAKE_CASE):
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ ):
raise NotImplementedError()
@abstractmethod
def UpperCamelCase__ ( self ):
raise NotImplementedError()
| 681 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase )
lowerCamelCase : Dict = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase )
lowerCamelCase : Optional[int] = checkpoints.load_tax_checkpoint(lowerCamelCase )
lowerCamelCase : int = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
lowerCamelCase : Optional[int] = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase : Optional[Any] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase : List[Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase : Dict = F'''layers_{str(lowerCamelCase )}'''
# Self-Attention
lowerCamelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
lowerCamelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
lowerCamelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
lowerCamelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
lowerCamelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
lowerCamelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
lowerCamelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
lowerCamelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
lowerCamelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
lowerCamelCase : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
lowerCamelCase : Union[str, Any] = flax_model.params["""encoder"""]["""block"""][str(lowerCamelCase )]["""layer"""]
lowerCamelCase : Any = tax_attention_key
lowerCamelCase : List[str] = tax_attention_out
lowerCamelCase : Tuple = tax_attention_query
lowerCamelCase : List[str] = tax_attention_value
lowerCamelCase : Optional[Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase : Optional[int] = tax_mlp_wi_a
lowerCamelCase : Union[str, Any] = tax_mlp_wi_a
else:
lowerCamelCase : Tuple = tax_mlp_wi
lowerCamelCase : List[str] = tax_mlp_wo
lowerCamelCase : Union[str, Any] = tax_mlp_layer_norm
lowerCamelCase : str = flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase : Dict = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
lowerCamelCase : str = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase : Tuple = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
lowerCamelCase : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase : List[str] = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
lowerCamelCase : int = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase : Optional[int] = F'''layers_{str(lowerCamelCase )}'''
# Self-Attention
lowerCamelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
lowerCamelCase : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
lowerCamelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
lowerCamelCase : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
lowerCamelCase : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
lowerCamelCase : Dict = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
lowerCamelCase : Optional[Any] = tax_enc_dec_attention_module["""key"""]["""kernel"""]
lowerCamelCase : str = tax_enc_dec_attention_module["""out"""]["""kernel"""]
lowerCamelCase : Union[str, Any] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
lowerCamelCase : str = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
lowerCamelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
lowerCamelCase : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
lowerCamelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
lowerCamelCase : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
lowerCamelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
lowerCamelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
lowerCamelCase : Tuple = flax_model.params["""decoder"""]["""block"""][str(lowerCamelCase )]["""layer"""]
lowerCamelCase : List[Any] = tax_attention_key
lowerCamelCase : int = tax_attention_out
lowerCamelCase : List[Any] = tax_attention_query
lowerCamelCase : Optional[Any] = tax_attention_value
lowerCamelCase : str = tax_pre_attention_layer_norm
lowerCamelCase : Optional[int] = tax_enc_dec_attention_key
lowerCamelCase : Dict = tax_enc_dec_attention_out
lowerCamelCase : List[Any] = tax_enc_dec_attention_query
lowerCamelCase : List[str] = tax_enc_dec_attention_value
lowerCamelCase : Dict = tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase : List[str] = tax_mlp_wi_a
lowerCamelCase : List[Any] = tax_mlp_wi_a
else:
lowerCamelCase : Dict = tax_mlp_wi
lowerCamelCase : Any = tax_mlp_wo
lowerCamelCase : str = txa_mlp_layer_norm
lowerCamelCase : List[Any] = flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase : Any = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
lowerCamelCase : List[Any] = txa_decoder_norm
# Only for layer 0:
lowerCamelCase : int = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
lowerCamelCase : Optional[int] = tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase : Any = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
lowerCamelCase : Tuple = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase : int = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(lowerCamelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
_lowerCamelCase =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 681 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCamelCase =logging.get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = question_encoder
lowerCamelCase : Dict = generator
lowerCamelCase : Tuple = self.question_encoder
def UpperCamelCase__ ( self , __magic_name__ ):
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" )
lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ )
if config is None:
lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ):
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.question_encoder
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.generator
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __magic_name__ , )
if max_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : int = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : Dict = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[Any] = labels["""input_ids"""]
return model_inputs
| 681 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ):
lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Union[str, Any] = max_resolution
lowerCamelCase : Union[str, Any] = do_resize
lowerCamelCase : int = size
lowerCamelCase : int = do_center_crop
lowerCamelCase : Union[str, Any] = crop_size
lowerCamelCase : Union[str, Any] = do_normalize
lowerCamelCase : Dict = image_mean
lowerCamelCase : Optional[Any] = image_std
lowerCamelCase : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase : Dict = []
for i in range(self.batch_size ):
lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ )
lowerCamelCase : Any = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 681 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = F'''{sampling_rate}'''
lowerCamelCase : Optional[int] = """1"""
lowerCamelCase : Any = """f32le"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ):
lowerCamelCase : Dict = F'''{sampling_rate}'''
lowerCamelCase : List[Any] = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Any = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Dict = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase : Dict = platform.system()
if system == "Linux":
lowerCamelCase : Union[str, Any] = """alsa"""
lowerCamelCase : List[Any] = """default"""
elif system == "Darwin":
lowerCamelCase : List[Any] = """avfoundation"""
lowerCamelCase : List[Any] = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Any = """default"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase )
for item in iterator:
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ):
if stream_chunk_s is not None:
lowerCamelCase : int = stream_chunk_s
else:
lowerCamelCase : Dict = chunk_length_s
lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase : Optional[int] = np.intaa
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : int = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase, (int, float) ):
lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s]
lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : List[Any] = datetime.datetime.now()
lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase )
lowerCamelCase : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : Optional[int] = B""""""
lowerCamelCase , lowerCamelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : str = (_stride_left, stride_right)
lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : Optional[int] = False
yield item
lowerCamelCase : str = stride_left
lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[Any] = False
yield item
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 681 | 1 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=3_0 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=None , __magic_name__=2 , ):
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : str = image_size
lowerCamelCase : Any = patch_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : List[Any] = is_training
lowerCamelCase : str = use_labels
lowerCamelCase : int = hidden_size
lowerCamelCase : List[Any] = num_hidden_layers
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Dict = intermediate_size
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : int = type_sequence_label_size
lowerCamelCase : int = initializer_range
lowerCamelCase : List[str] = scope
lowerCamelCase : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase : Optional[int] = (image_size // patch_size) ** 2
lowerCamelCase : int = num_patches + 2
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Tuple = DeiTModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = DeiTForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : Optional[int] = 1
lowerCamelCase : Optional[int] = DeiTForMaskedImageModeling(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = self.type_sequence_label_size
lowerCamelCase : List[str] = DeiTForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Optional[Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : Any = 1
lowerCamelCase : str = DeiTForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : List[Any] = False
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = DeiTModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : int = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
lowerCamelCase : List[str] = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Any = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__magic_name__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowerCamelCase : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
lowerCamelCase : Optional[int] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
lowerCamelCase : Union[str, Any] = model(**__magic_name__ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCamelCase : Dict = False
lowerCamelCase : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(__magic_name__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowerCamelCase : int = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
lowerCamelCase : Union[str, Any] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
lowerCamelCase : Optional[int] = model(**__magic_name__ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__magic_name__ ),
*get_values(__magic_name__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowerCamelCase : Union[str, Any] = problem_type["""title"""]
lowerCamelCase : Union[str, Any] = problem_type["""num_labels"""]
lowerCamelCase : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
lowerCamelCase : Any = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if problem_type["num_labels"] > 1:
lowerCamelCase : Union[str, Any] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowerCamelCase : int = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__magic_name__ ) as warning_list:
lowerCamelCase : Tuple = model(**__magic_name__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = DeiTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
__magic_name__ )
lowerCamelCase : Optional[int] = self.default_image_processor
lowerCamelCase : Any = prepare_img()
lowerCamelCase : List[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Union[str, Any] = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowerCamelCase : int = self.default_image_processor
lowerCamelCase : Tuple = prepare_img()
lowerCamelCase : Union[str, Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" )
lowerCamelCase : List[str] = inputs.pixel_values.to(__magic_name__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(__magic_name__ )
| 681 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , )
assert hasattr(self , """env""" )
def UpperCamelCase__ ( self , __magic_name__ ):
# configuration for running training on smdistributed Model Parallel
lowerCamelCase : Any = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCamelCase : Any = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , )
def UpperCamelCase__ ( self , __magic_name__ ):
TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCamelCase__ ( self , __magic_name__ ):
# create estimator
lowerCamelCase : int = self.create_estimator(__magic_name__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
| 681 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """luke"""
def __init__( self , __magic_name__=5_0_2_6_7 , __magic_name__=5_0_0_0_0_0 , __magic_name__=7_6_8 , __magic_name__=2_5_6 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=None , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : List[str] = entity_vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : str = entity_emb_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : List[Any] = type_vocab_size
lowerCamelCase : str = initializer_range
lowerCamelCase : Optional[Any] = layer_norm_eps
lowerCamelCase : str = use_entity_aware_attention
lowerCamelCase : int = classifier_dropout
| 681 |
from __future__ import annotations
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = str(lowerCamelCase )
return n == n[::-1]
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = 0
for i in range(1, lowerCamelCase ):
if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 681 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCamelCase =logging.getLogger(__name__)
_lowerCamelCase =list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowerCamelCase =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__SCREAMING_SNAKE_CASE)} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class A__ :
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The input training data file (a text file)."""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""})
_UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether ot not to use whole word mask."""})
_UpperCAmelCase : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""})
_UpperCAmelCase : float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
_UpperCAmelCase : int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""})
_UpperCAmelCase : int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None, ):
def _dataset(lowerCamelCase, lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase, file_path=lowerCamelCase, block_size=args.block_size, ref_path=lowerCamelCase, )
return LineByLineTextDataset(tokenizer=lowerCamelCase, file_path=lowerCamelCase, block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase, file_path=lowerCamelCase, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=lowerCamelCase, )
if evaluate:
return _dataset(args.eval_data_file, args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file, args.train_ref_file )
def _a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""", lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCamelCase : Dict = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
lowerCamelCase : int = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
lowerCamelCase : List[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=lowerCamelCase, cache_dir=model_args.cache_dir, )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase : int = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
lowerCamelCase : Union[str, Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCamelCase : Dict = min(data_args.block_size, tokenizer.max_len )
# Get datasets
lowerCamelCase : Dict = (
get_dataset(lowerCamelCase, tokenizer=lowerCamelCase, cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCamelCase : List[str] = (
get_dataset(lowerCamelCase, tokenizer=lowerCamelCase, evaluate=lowerCamelCase, cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCamelCase : List[str] = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase, plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length, )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCamelCase : int = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase, mlm_probability=data_args.mlm_probability )
else:
lowerCamelCase : List[Any] = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase : int = Trainer(
model=lowerCamelCase, args=lowerCamelCase, data_collator=lowerCamelCase, train_dataset=lowerCamelCase, eval_dataset=lowerCamelCase, prediction_loss_only=lowerCamelCase, )
# Training
if training_args.do_train:
lowerCamelCase : Union[str, Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase : List[str] = trainer.evaluate()
lowerCamelCase : List[Any] = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase : Dict = {"""perplexity""": perplexity}
lowerCamelCase : Optional[Any] = os.path.join(training_args.output_dir, """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""", lowerCamelCase, str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 681 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[Any] = """"""
else:
lowerCamelCase : Optional[int] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : Any = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCamelCase : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = dct.pop(lowerCamelCase )
lowerCamelCase : str = val
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = ViTMSNConfig()
lowerCamelCase : Tuple = 1000
lowerCamelCase : List[Any] = """datasets/huggingface/label-files"""
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) )
lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[int] = idalabel
lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase : int = 384
lowerCamelCase : Optional[int] = 1536
lowerCamelCase : Tuple = 6
elif "l16" in checkpoint_url:
lowerCamelCase : Dict = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Optional[int] = 24
lowerCamelCase : str = 16
lowerCamelCase : str = 0.1
elif "b4" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowerCamelCase : Tuple = 7
lowerCamelCase : Optional[int] = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Tuple = 24
lowerCamelCase : Dict = 16
lowerCamelCase : str = 0.1
lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase )
lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""]
lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase )
lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
lowerCamelCase : Union[str, Any] = ViTImageProcessor(
size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase )
lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : int = model(**lowerCamelCase )
lowerCamelCase : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 681 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase ="""\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
_lowerCamelCase ="""\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
_lowerCamelCase ="""
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=True , __magic_name__=False ):
if rouge_types is None:
lowerCamelCase : List[Any] = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
lowerCamelCase : str = rouge_scorer.RougeScorer(rouge_types=__magic_name__ , use_stemmer=__magic_name__ )
if use_aggregator:
lowerCamelCase : Any = scoring.BootstrapAggregator()
else:
lowerCamelCase : List[Any] = []
for ref, pred in zip(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = scorer.score(__magic_name__ , __magic_name__ )
if use_aggregator:
aggregator.add_scores(__magic_name__ )
else:
scores.append(__magic_name__ )
if use_aggregator:
lowerCamelCase : Optional[int] = aggregator.aggregate()
else:
lowerCamelCase : int = {}
for key in scores[0]:
lowerCamelCase : Tuple = [score[key] for score in scores]
return result
| 681 |
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : Any = len(lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase : List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase ):
return None
lowerCamelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCamelCase : Optional[Any] = left
lowerCamelCase : List[Any] = point
elif point > right:
lowerCamelCase : Tuple = right
lowerCamelCase : List[Any] = point
else:
if item < current_item:
lowerCamelCase : Dict = point - 1
else:
lowerCamelCase : Union[str, Any] = point + 1
return None
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase : Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase, lowerCamelCase, lowerCamelCase, point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase, lowerCamelCase, point + 1, lowerCamelCase )
def _a ( lowerCamelCase ):
if collection != sorted(lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase =0
if debug == 1:
_lowerCamelCase =[1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
_lowerCamelCase =6_7
_lowerCamelCase =interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("""Not found""")
| 681 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 | 1 |
from math import ceil
def _a ( lowerCamelCase = 1001 ):
lowerCamelCase : Union[str, Any] = 1
for i in range(1, int(ceil(n / 2.0 ) ) ):
lowerCamelCase : Any = 2 * i + 1
lowerCamelCase : Any = 2 * i
lowerCamelCase : str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_lowerCamelCase =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 681 |
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ):
lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Union[str, Any] = max_resolution
lowerCamelCase : Union[str, Any] = do_resize
lowerCamelCase : int = size
lowerCamelCase : int = do_center_crop
lowerCamelCase : Union[str, Any] = crop_size
lowerCamelCase : Union[str, Any] = do_normalize
lowerCamelCase : Dict = image_mean
lowerCamelCase : Optional[Any] = image_std
lowerCamelCase : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase : Dict = []
for i in range(self.batch_size ):
lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ )
lowerCamelCase : Any = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 681 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowerCamelCase ={
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _a ( lowerCamelCase ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _a ( lowerCamelCase, lowerCamelCase ):
if args.student_type == "roberta":
lowerCamelCase : Union[str, Any] = False
elif args.student_type == "gpt2":
lowerCamelCase : Optional[int] = False
def _a ( lowerCamelCase, lowerCamelCase ):
if args.student_type == "roberta":
lowerCamelCase : str = False
def _a ( ):
lowerCamelCase : Optional[int] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""", action="""store_true""", help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""", type=lowerCamelCase, required=lowerCamelCase, help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""", type=lowerCamelCase, required=lowerCamelCase, help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""", )
parser.add_argument(
"""--student_type""", type=lowerCamelCase, choices=["""distilbert""", """roberta""", """gpt2"""], required=lowerCamelCase, help="""The student type (DistilBERT, RoBERTa).""", )
parser.add_argument("""--student_config""", type=lowerCamelCase, required=lowerCamelCase, help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""", default=lowerCamelCase, type=lowerCamelCase, help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""", choices=["""bert""", """roberta""", """gpt2"""], required=lowerCamelCase, help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""", type=lowerCamelCase, required=lowerCamelCase, help="""The teacher model.""" )
parser.add_argument("""--temperature""", default=2.0, type=lowerCamelCase, help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""", default=0.5, type=lowerCamelCase, help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""", default=0.0, type=lowerCamelCase, help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""", )
parser.add_argument("""--alpha_clm""", default=0.5, type=lowerCamelCase, help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""", default=0.0, type=lowerCamelCase, help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""", default=0.0, type=lowerCamelCase, help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""", action="""store_true""", help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""", default=0.1_5, type=lowerCamelCase, help="""Proportion of tokens for which we need to make a prediction.""", )
parser.add_argument("""--word_mask""", default=0.8, type=lowerCamelCase, help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""", default=0.1, type=lowerCamelCase, help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""", default=0.1, type=lowerCamelCase, help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""", default=0.7, type=lowerCamelCase, help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""", )
parser.add_argument("""--token_counts""", type=lowerCamelCase, help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""", action="""store_true""", help="""If true, compute the distillation loss only the [MLM] prediction distribution.""", )
parser.add_argument(
"""--freeze_pos_embs""", action="""store_true""", help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""", )
parser.add_argument(
"""--freeze_token_type_embds""", action="""store_true""", help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""", )
parser.add_argument("""--n_epoch""", type=lowerCamelCase, default=3, help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""", type=lowerCamelCase, default=5, help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""", action="""store_false""", help="""If true, group sequences that have similar length into the same batch. Default is true.""", )
parser.add_argument(
"""--gradient_accumulation_steps""", type=lowerCamelCase, default=50, help="""Gradient accumulation for larger training batches.""", )
parser.add_argument("""--warmup_prop""", default=0.0_5, type=lowerCamelCase, help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""", default=0.0, type=lowerCamelCase, help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""", default=5e-4, type=lowerCamelCase, help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""", default=1e-6, type=lowerCamelCase, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""", default=5.0, type=lowerCamelCase, help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""", default=0.0_2, type=lowerCamelCase, help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""", )
parser.add_argument(
"""--fp16_opt_level""", type=lowerCamelCase, default="""O1""", help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
), )
parser.add_argument("""--n_gpu""", type=lowerCamelCase, default=1, help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""", type=lowerCamelCase, default=-1, help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""", type=lowerCamelCase, default=56, help="""Random seed""" )
parser.add_argument("""--log_interval""", type=lowerCamelCase, default=500, help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""", type=lowerCamelCase, default=4000, help="""Checkpoint interval.""" )
lowerCamelCase : str = parser.parse_args()
sanity_checks(lowerCamelCase )
# ARGS #
init_gpu_params(lowerCamelCase )
set_seed(lowerCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path, """parameters.json""" ), """w""" ) as f:
json.dump(vars(lowerCamelCase ), lowerCamelCase, indent=4 )
git_log(args.dump_path )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = MODEL_CLASSES[args.student_type]
lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCamelCase : List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCamelCase : Tuple = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCamelCase : List[Any] = tokenizer.all_special_tokens.index(lowerCamelCase )
lowerCamelCase : str = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
lowerCamelCase : List[str] = special_tok_ids
lowerCamelCase : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file, """rb""" ) as fp:
lowerCamelCase : Union[str, Any] = pickle.load(lowerCamelCase )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts, """rb""" ) as fp:
lowerCamelCase : str = pickle.load(lowerCamelCase )
lowerCamelCase : Any = np.maximum(lowerCamelCase, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCamelCase : str = 0.0 # do not predict special tokens
lowerCamelCase : int = torch.from_numpy(lowerCamelCase )
else:
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = LmSeqsDataset(params=lowerCamelCase, data=lowerCamelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
lowerCamelCase : Optional[Any] = student_config_class.from_pretrained(args.student_config )
lowerCamelCase : Optional[Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
lowerCamelCase : int = student_model_class.from_pretrained(args.student_pretrained_weights, config=lowerCamelCase )
else:
lowerCamelCase : List[str] = student_model_class(lowerCamelCase )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
lowerCamelCase : Optional[Any] = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=lowerCamelCase )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCamelCase, lowerCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCamelCase, lowerCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCamelCase : List[Any] = Distiller(
params=lowerCamelCase, dataset=lowerCamelCase, token_probs=lowerCamelCase, student=lowerCamelCase, teacher=lowerCamelCase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 681 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681 | 1 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_lowerCamelCase ={
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCamelCase : int = cached_file(lowerCamelCase, lowerCamelCase, force_download=not use_cached_models )
lowerCamelCase : Optional[Any] = config_class.from_json_file(lowerCamelCase )
lowerCamelCase : List[Any] = True
lowerCamelCase : Tuple = True
print(F'''Building TensorFlow model from configuration: {config}''' )
lowerCamelCase : Union[str, Any] = model_class(lowerCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCamelCase : Optional[Any] = cached_file(
lowerCamelCase, lowerCamelCase, force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCamelCase : Dict = load_pytorch_checkpoint_in_tfa_model(lowerCamelCase, lowerCamelCase )
if compare_with_pt_model:
lowerCamelCase : List[str] = tf_model(tf_model.dummy_inputs, training=lowerCamelCase ) # build the network
lowerCamelCase : Tuple = torch.load(lowerCamelCase, map_location="""cpu""" )
lowerCamelCase : Optional[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase, config=lowerCamelCase, state_dict=lowerCamelCase )
with torch.no_grad():
lowerCamelCase : Optional[Any] = pt_model(**pt_model.dummy_inputs )
lowerCamelCase : Optional[int] = pto[0].numpy()
lowerCamelCase : int = tfo[0].numpy()
lowerCamelCase : List[str] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(lowerCamelCase, save_format="""h5""" )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, ):
if args_model_type is None:
lowerCamelCase : int = list(MODEL_CLASSES.keys() )
else:
lowerCamelCase : List[str] = [args_model_type]
for j, model_type in enumerate(lowerCamelCase, start=1 ):
print("""=""" * 100 )
print(F''' Converting model type {j}/{len(lowerCamelCase )}: {model_type}''' )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCamelCase : Optional[int] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCamelCase : List[str] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCamelCase, lowerCamelCase ), start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowerCamelCase : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(lowerCamelCase )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
lowerCamelCase : str = cached_file(lowerCamelCase, lowerCamelCase, force_download=not use_cached_models )
else:
lowerCamelCase : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCamelCase : str = cached_file(lowerCamelCase, lowerCamelCase, force_download=not use_cached_models )
else:
lowerCamelCase : int = model_shortcut_name
if os.path.isfile(lowerCamelCase ):
lowerCamelCase : Optional[Any] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=lowerCamelCase, pytorch_checkpoint_path=lowerCamelCase, config_file=lowerCamelCase, tf_dump_path=os.path.join(lowerCamelCase, model_shortcut_name + """-tf_model.h5""" ), compare_with_pt_model=lowerCamelCase, )
if remove_cached_files:
os.remove(lowerCamelCase )
os.remove(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
_lowerCamelCase =parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 681 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 681 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowerCamelCase =logging.getLogger(__name__)
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[float] = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""})
_UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to SortishSamler or not."""})
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""})
_UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """whether to use adafactor"""})
_UpperCAmelCase : Optional[float] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""})
_UpperCAmelCase : Optional[float] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""})
_UpperCAmelCase : Optional[float] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Dropout probability. Goes into model.config."""})
_UpperCAmelCase : Optional[float] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Attention dropout probability. Goes into model.config."""})
_UpperCAmelCase : Optional[str] = field(
default="""linear""" , metadata={"""help""": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}"} , )
| 681 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCamelCase =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCamelCase =[ord(letter) for letter in string.ascii_lowercase]
_lowerCamelCase ={ord(char) for char in VALID_CHARS}
_lowerCamelCase =["the", "be", "to", "of", "and", "in", "that", "have"]
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = ""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
for keychar, cipherchar in zip(cycle(lowerCamelCase ), lowerCamelCase ):
lowerCamelCase : Optional[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase )
return decoded
def _a ( lowerCamelCase ):
lowerCamelCase : list[str] = []
for key in product(lowerCamelCase, repeat=3 ):
lowerCamelCase : Optional[Any] = try_key(lowerCamelCase, lowerCamelCase )
if encoded is not None:
possibles.append(lowerCamelCase )
return possibles
def _a ( lowerCamelCase, lowerCamelCase ):
return [possible for possible in possibles if common_word in possible.lower()]
def _a ( lowerCamelCase = "p059_cipher.txt" ):
lowerCamelCase : list[int]
lowerCamelCase : list[str]
lowerCamelCase : str
lowerCamelCase : str
lowerCamelCase : str = Path(lowerCamelCase ).parent.joinpath(lowerCamelCase ).read_text(encoding="""utf-8""" )
lowerCamelCase : Union[str, Any] = [int(lowerCamelCase ) for number in data.strip().split(""",""" )]
lowerCamelCase : List[Any] = filter_valid_chars(lowerCamelCase )
for common_word in COMMON_WORDS:
lowerCamelCase : Any = filter_common_word(lowerCamelCase, lowerCamelCase )
if len(lowerCamelCase ) == 1:
break
lowerCamelCase : List[Any] = possibles[0]
return sum(ord(lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 681 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
lowerCamelCase : Optional[Any] = nn.Parameter(lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
lowerCamelCase : Any = nn.Parameter(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# set torch weights for 1-to-1 comparison
lowerCamelCase : Any = np.asarray(weights[0] )
lowerCamelCase : List[str] = np.asarray(weights[1] )
lowerCamelCase : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key, torch.tensor(lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, lowerCamelCase ), )
set_param(
torch_layer.self_attention.value, torch.tensor(lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, lowerCamelCase ), )
set_param(
torch_layer.output.dense, torch.tensor(lowerCamelCase ).view(-1, lowerCamelCase ).contiguous().transpose(0, 1 ), )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# set torch weights for 1-to-1 comparison
lowerCamelCase : Any = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
lowerCamelCase : Union[str, Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query, torch.tensor(lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, lowerCamelCase ), )
set_param(
torch_layer.self_attention.key, torch.tensor(lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, lowerCamelCase ), )
set_param(
torch_layer.self_attention.value, torch.tensor(lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, lowerCamelCase ), )
set_param(
torch_layer.output.dense, torch.tensor(lowerCamelCase ).view(-1, lowerCamelCase ).contiguous().transpose(0, 1 ), )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# layernorm 1
lowerCamelCase : int = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Any = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm, torch.tensor(lowerCamelCase ), torch.tensor(lowerCamelCase ), )
# lsh weights + output
lowerCamelCase : Tuple = weights[0][1]
if len(lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase, torch_block.attention, lowerCamelCase )
else:
set_layer_weights_in_torch_local(lowerCamelCase, torch_block.attention, lowerCamelCase )
# intermediate weighs
lowerCamelCase : Union[str, Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase ) == 4:
lowerCamelCase : Optional[int] = intermediate_weights[2]
# layernorm 2
lowerCamelCase : int = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm, torch.tensor(lowerCamelCase ), torch.tensor(lowerCamelCase ), )
# intermediate dense
lowerCamelCase : int = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense, torch.tensor(lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(lowerCamelCase ), )
# intermediate out
lowerCamelCase : str = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense, torch.tensor(lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(lowerCamelCase ), )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# reformer model
lowerCamelCase : List[str] = torch_model.reformer
# word embeds
lowerCamelCase : Optional[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings, torch.tensor(lowerCamelCase ), )
if isinstance(weights[3], lowerCamelCase ):
lowerCamelCase : str = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : Union[str, Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
lowerCamelCase : Dict = nn.Parameter(torch.tensor(lowerCamelCase ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : List[str] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# output layer norm
lowerCamelCase : List[Any] = np.asarray(weights[7][0] )
lowerCamelCase : Any = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm, torch.tensor(lowerCamelCase ), torch.tensor(lowerCamelCase ), )
# output embeddings
lowerCamelCase : Optional[int] = np.asarray(weights[9][0] )
lowerCamelCase : List[str] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder, torch.tensor(lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(lowerCamelCase ), )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Dict = ReformerModelWithLMHead(lowerCamelCase )
with open(lowerCamelCase, """rb""" ) as f:
lowerCamelCase : Tuple = pickle.load(lowerCamelCase )["""weights"""]
set_model_weights_in_torch(lowerCamelCase, lowerCamelCase, config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 681 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.