code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :str = logging.get_logger(__name__)
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> int:
lowerCamelCase : int = original_name.split(""".""" )[0]
lowerCamelCase : int = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_list[key_list.index(UpperCamelCase__ ) - 2] )
lowerCamelCase : List[Any] = int(key_list[key_list.index(UpperCamelCase__ ) - 1] )
lowerCamelCase : List[str] = orig_block_num - offset
lowerCamelCase : Optional[int] = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> str:
lowerCamelCase : Union[str, Any] = OrderedDict()
lowerCamelCase , lowerCamelCase : Dict = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
lowerCamelCase : Any = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCamelCase : List[Any] = key[: key.find("""proj""" )]
lowerCamelCase : Optional[Any] = key.replace(UpperCamelCase__ , F'patch_embeddings.{total_embed_found}.' )
lowerCamelCase : str = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCamelCase : Any = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
lowerCamelCase : Optional[int] = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
lowerCamelCase : str = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
lowerCamelCase : List[Any] = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm1""" , """before_norm""" )
if "norm2" in key:
lowerCamelCase : Dict = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
lowerCamelCase : str = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
lowerCamelCase : Optional[int] = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
lowerCamelCase : List[str] = key.replace("""head""" , """classifier""" )
lowerCamelCase : Dict = value
return new_state_dict
def snake_case ( ) -> Tuple:
lowerCamelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Optional[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
lowerCamelCase : str = PoolFormerConfig()
# set attributes based on model_name
lowerCamelCase : int = """huggingface/label-files"""
lowerCamelCase : List[Any] = model_name[-3:]
lowerCamelCase : Tuple = 1000
lowerCamelCase : Dict = """imagenet-1k-id2label.json"""
lowerCamelCase : List[str] = (1, 1000)
# set config attributes
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : List[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[Any] = idalabel
lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCamelCase : Dict = [2, 2, 6, 2]
lowerCamelCase : List[str] = [64, 128, 320, 512]
lowerCamelCase : List[str] = 4.0
lowerCamelCase : Optional[Any] = 0.9
elif size == "s24":
lowerCamelCase : int = [4, 4, 12, 4]
lowerCamelCase : Optional[int] = [64, 128, 320, 512]
lowerCamelCase : Optional[Any] = 4.0
lowerCamelCase : List[Any] = 0.9
elif size == "s36":
lowerCamelCase : Optional[Any] = [6, 6, 18, 6]
lowerCamelCase : Dict = [64, 128, 320, 512]
lowerCamelCase : Union[str, Any] = 4.0
lowerCamelCase : int = 1E-6
lowerCamelCase : Tuple = 0.9
elif size == "m36":
lowerCamelCase : Tuple = [6, 6, 18, 6]
lowerCamelCase : int = [96, 192, 384, 768]
lowerCamelCase : Dict = 4.0
lowerCamelCase : str = 1E-6
lowerCamelCase : Tuple = 0.9_5
elif size == "m48":
lowerCamelCase : Optional[Any] = [8, 8, 24, 8]
lowerCamelCase : Optional[int] = [96, 192, 384, 768]
lowerCamelCase : List[Any] = 4.0
lowerCamelCase : List[str] = 1E-6
lowerCamelCase : Tuple = 0.9_5
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
lowerCamelCase : Tuple = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
# Prepare image
lowerCamelCase : Tuple = prepare_img()
lowerCamelCase : str = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
lowerCamelCase : Tuple = torch.load(UpperCamelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCamelCase : List[Any] = rename_keys(UpperCamelCase__ )
# create HuggingFace model and load state dict
lowerCamelCase : Optional[Any] = PoolFormerForImageClassification(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Define image processor
lowerCamelCase : Any = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
lowerCamelCase : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
lowerCamelCase : List[Any] = model(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCamelCase : Union[str, Any] = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
lowerCamelCase : Optional[Any] = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
lowerCamelCase : Any = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
lowerCamelCase : Union[str, Any] = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
lowerCamelCase : List[Any] = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 42
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 1
|
"""simple docstring"""
__lowerCamelCase :Dict = 'Input must be a string of 8 numbers plus letter'
__lowerCamelCase :Any = 'TRWAGMYFPDXBNJZSQVHLCKE'
def snake_case ( UpperCamelCase__ : str ) -> bool:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = F'Expected string as input, found {type(UpperCamelCase__ ).__name__}'
raise TypeError(UpperCamelCase__ )
lowerCamelCase : List[str] = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCamelCase__ ) != 9:
raise ValueError(UpperCamelCase__ )
try:
lowerCamelCase : Union[str, Any] = int(spanish_id_clean[0:8] )
lowerCamelCase : List[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCamelCase__ ) from ex
if letter.isdigit():
raise ValueError(UpperCamelCase__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__lowerCamelCase :int = True
except ImportError:
__lowerCamelCase :Tuple = False
__lowerCamelCase :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case ( UpperCamelCase__ : Namespace ) -> Optional[int]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class A__ ( __lowercase):
"""simple docstring"""
@staticmethod
def a__ ( __a: ArgumentParser )-> Tuple:
lowerCamelCase : int = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=__a , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=__a , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=__a )
def __init__( self: List[Any] , __a: bool , __a: str , __a: Dict=None , *__a: Optional[Any] )-> int:
lowerCamelCase : Any = testing
lowerCamelCase : Any = testing_file
lowerCamelCase : Optional[Any] = path
def a__ ( self: int )-> Optional[int]:
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase : Optional[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(__a ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
lowerCamelCase : List[Any] = (
Path(__a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase : Optional[int] = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__a ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
lowerCamelCase : Union[str, Any] = json.load(__a )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__a , extra_context=__a , )
lowerCamelCase : Dict = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
lowerCamelCase : List[str] = json.load(__a )
lowerCamelCase : Union[str, Any] = configuration["""lowercase_modelname"""]
lowerCamelCase : List[str] = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f'{directory}/configuration.json' )
lowerCamelCase : Optional[int] = """PyTorch""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase : List[str] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase : Union[str, Any] = """Flax""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase : List[Any] = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(__a , exist_ok=__a )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=__a )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , """w""" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(__a: int ):
with open(__a , """r""" ) as f:
lowerCamelCase : List[str] = f.readlines()
with open(__a , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__a )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__a: str , __a: str , __a: List[str] ):
# Create temp file
lowerCamelCase , lowerCamelCase : List[Any] = mkstemp()
lowerCamelCase : Dict = False
with fdopen(__a , """w""" ) as new_file:
with open(__a ) as old_file:
for line in old_file:
new_file.write(__a )
if line_to_copy_below in line:
lowerCamelCase : List[str] = True
for line_to_copy in lines_to_copy:
new_file.write(__a )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(__a , __a )
# Remove original file
remove(__a )
# Move new file
move(__a , __a )
def skip_units(__a: Union[str, Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__a: Union[str, Any] ):
with open(__a ) as datafile:
lowerCamelCase : Tuple = []
lowerCamelCase : Dict = False
lowerCamelCase : Optional[int] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase : Union[str, Any] = line.split("""\"""" )[1]
lowerCamelCase : Any = skip_units(__a )
elif "# Below: " in line and "##" not in line:
lowerCamelCase : List[str] = line.split("""\"""" )[1]
lowerCamelCase : List[str] = skip_units(__a )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__a , __a , __a )
lowerCamelCase : str = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase : Dict = []
elif "##" not in line:
lines_to_copy.append(__a )
remove(__a )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(__a )
| 42
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int = 10 ) -> str:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or n < 0:
raise ValueError("""Invalid input""" )
lowerCamelCase : Optional[Any] = 10**n
lowerCamelCase : str = 28433 * (pow(2 , 7830457 , UpperCamelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase :str = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :str = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 1
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase : Tuple = ksize + 1
lowerCamelCase : Optional[Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCamelCase__ ):
for x in range(UpperCamelCase__ ):
# distance from center
lowerCamelCase : int = x - ksize // 2
lowerCamelCase : Optional[Any] = y - ksize // 2
# degree to radiant
lowerCamelCase : Dict = theta / 180 * np.pi
lowerCamelCase : Dict = np.cos(_theta )
lowerCamelCase : str = np.sin(_theta )
# get kernel x
lowerCamelCase : List[str] = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase : Tuple = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase : int = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase :int = imread('../image_data/lena.jpg')
# turn image in gray scale value
__lowerCamelCase :int = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase :str = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__lowerCamelCase :int = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase :int = out / out.max() * 255
__lowerCamelCase :Tuple = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import argparse
import datetime
def snake_case ( UpperCamelCase__ : str ) -> str:
lowerCamelCase : Any = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowerCamelCase : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowerCamelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowerCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowerCamelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowerCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowerCamelCase : List[Any] = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
lowerCamelCase : Optional[int] = y - 1
lowerCamelCase : int = m + 12
# maths var
lowerCamelCase : int = int(str(UpperCamelCase__ )[:2] )
lowerCamelCase : int = int(str(UpperCamelCase__ )[2:] )
lowerCamelCase : int = int(2.6 * m - 5.3_9 )
lowerCamelCase : int = int(c / 4 )
lowerCamelCase : int = int(k / 4 )
lowerCamelCase : int = int(d + k )
lowerCamelCase : int = int(t + u + v + x )
lowerCamelCase : int = int(z - (2 * c) )
lowerCamelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowerCamelCase : str = F'Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase :List[str] = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
__lowerCamelCase :Dict = parser.parse_args()
zeller(args.date_input)
| 42
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase :List[str] = logging.get_logger(__name__)
__lowerCamelCase :str = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A__ ( __lowercase , __lowercase):
"""simple docstring"""
snake_case__ : Any ='''nat'''
snake_case__ : List[str] ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Optional[Any] , __a: int=4 , __a: Union[str, Any]=3 , __a: Optional[int]=64 , __a: Optional[int]=[3, 4, 6, 5] , __a: Optional[int]=[2, 4, 8, 16] , __a: Union[str, Any]=7 , __a: Tuple=3.0 , __a: str=True , __a: Tuple=0.0 , __a: Any=0.0 , __a: Dict=0.1 , __a: List[Any]="gelu" , __a: List[Any]=0.02 , __a: Dict=1e-5 , __a: Union[str, Any]=0.0 , __a: Optional[int]=None , __a: List[str]=None , **__a: Tuple , )-> List[str]:
super().__init__(**__a )
lowerCamelCase : Any = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : int = embed_dim
lowerCamelCase : Tuple = depths
lowerCamelCase : Optional[int] = len(__a )
lowerCamelCase : Union[str, Any] = num_heads
lowerCamelCase : Any = kernel_size
lowerCamelCase : Dict = mlp_ratio
lowerCamelCase : Tuple = qkv_bias
lowerCamelCase : Optional[int] = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = drop_path_rate
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase : int = int(embed_dim * 2 ** (len(__a ) - 1) )
lowerCamelCase : Dict = layer_scale_init_value
lowerCamelCase : int = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(__a ) + 1 )]
lowerCamelCase , lowerCamelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 42
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :str = {}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''llama'''
snake_case__ : Dict =['''past_key_values''']
def __init__( self: Optional[Any] , __a: Union[str, Any]=32_000 , __a: Optional[Any]=4_096 , __a: List[Any]=11_008 , __a: List[str]=32 , __a: List[Any]=32 , __a: List[Any]=None , __a: int="silu" , __a: List[Any]=2_048 , __a: List[str]=0.02 , __a: Optional[int]=1e-6 , __a: Any=True , __a: Optional[Any]=0 , __a: List[str]=1 , __a: int=2 , __a: List[str]=1 , __a: List[Any]=False , __a: str=None , **__a: List[Any] , )-> List[str]:
lowerCamelCase : Dict = vocab_size
lowerCamelCase : Union[str, Any] = max_position_embeddings
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : Any = intermediate_size
lowerCamelCase : Union[str, Any] = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : int = num_key_value_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : List[Any] = rms_norm_eps
lowerCamelCase : List[Any] = pretraining_tp
lowerCamelCase : str = use_cache
lowerCamelCase : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def a__ ( self: str )-> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
lowerCamelCase : Tuple = self.rope_scaling.get("""type""" , __a )
lowerCamelCase : str = self.rope_scaling.get("""factor""" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : str ) -> int:
assert column_title.isupper()
lowerCamelCase : int = 0
lowerCamelCase : Dict = len(UpperCamelCase__ ) - 1
lowerCamelCase : Optional[int] = 0
while index >= 0:
lowerCamelCase : int = (ord(column_title[index] ) - 64) * pow(26 , UpperCamelCase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 42
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def snake_case ( ) -> tuple[list[int], int]:
lowerCamelCase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
lowerCamelCase : Dict = randint(-5000 , 5000 )
return (arr, r)
__lowerCamelCase :str = make_dataset()
def snake_case ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCamelCase__ , 3 ):
if sum(UpperCamelCase__ ) == target:
return tuple(sorted(UpperCamelCase__ ) )
return (0, 0, 0)
def snake_case ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> tuple[int, int, int]:
arr.sort()
lowerCamelCase : List[str] = len(UpperCamelCase__ )
for i in range(n - 1 ):
lowerCamelCase , lowerCamelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def snake_case ( ) -> tuple[float, float]:
lowerCamelCase : Any = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
lowerCamelCase : List[Any] = """
triplet_sum1(*dataset)
"""
lowerCamelCase : Any = """
triplet_sum2(*dataset)
"""
lowerCamelCase : int = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=10000 )
lowerCamelCase : Optional[Any] = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=10000 )
return (min(UpperCamelCase__ ), min(UpperCamelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCamelCase :int = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 42
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 1
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__lowerCamelCase :Optional[Any] = True
except (ImportError, AttributeError):
__lowerCamelCase :List[Any] = object
def snake_case ( *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> Any:
pass
__lowerCamelCase :Dict = False
__lowerCamelCase :Union[str, Any] = logging.get_logger('transformers-cli/serving')
def snake_case ( UpperCamelCase__ : Namespace ) -> int:
lowerCamelCase : Dict = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(UpperCamelCase__ , args.host , args.port , args.workers )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : dict
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[str]
snake_case__ : Optional[List[int]]
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : str
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Any
class A__ ( __lowercase):
"""simple docstring"""
@staticmethod
def a__ ( __a: ArgumentParser )-> Optional[Any]:
lowerCamelCase : str = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=__a , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=__a , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=__a , default=8_888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=__a , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=__a , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=__a , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=__a , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=__a , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=__a )
def __init__( self: Union[str, Any] , __a: Pipeline , __a: str , __a: int , __a: int )-> Optional[Any]:
lowerCamelCase : List[str] = pipeline
lowerCamelCase : int = host
lowerCamelCase : Tuple = port
lowerCamelCase : Dict = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f'Serving model over {host}:{port}' )
lowerCamelCase : List[Any] = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=__a , response_class=__a , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=__a , response_class=__a , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=__a , response_class=__a , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=__a , response_class=__a , methods=["""POST"""] , ),
] , timeout=600 , )
def a__ ( self: Dict )-> List[str]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def a__ ( self: Any )-> List[Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def a__ ( self: Tuple , __a: str = Body(__a , embed=__a ) , __a: bool = Body(__a , embed=__a ) )-> Optional[int]:
try:
lowerCamelCase : int = self._pipeline.tokenizer.tokenize(__a )
if return_ids:
lowerCamelCase : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(__a )
return ServeTokenizeResult(tokens=__a , tokens_ids=__a )
else:
return ServeTokenizeResult(tokens=__a )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__a )} )
def a__ ( self: str , __a: List[int] = Body(__a , embed=__a ) , __a: bool = Body(__a , embed=__a ) , __a: bool = Body(__a , embed=__a ) , )-> Optional[Any]:
try:
lowerCamelCase : List[Any] = self._pipeline.tokenizer.decode(__a , __a , __a )
return ServeDeTokenizeResult(model="""""" , text=__a )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__a )} )
async def a__ ( self: Any , __a: Union[str, Any]=Body(__a , embed=__a ) )-> Any:
# Check we don't have empty string
if len(__a ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCamelCase : str = self._pipeline(__a )
return ServeForwardResult(output=__a )
except Exception as e:
raise HTTPException(500 , {"""error""": str(__a )} )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCamelCase :Optional[Any] = 'scheduler_config.json'
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] =1
snake_case__ : Tuple =2
snake_case__ : Dict =3
snake_case__ : Tuple =4
snake_case__ : Tuple =5
@dataclass
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : jnp.ndarray
class A__ :
"""simple docstring"""
snake_case__ : Tuple =SCHEDULER_CONFIG_NAME
snake_case__ : Union[str, Any] =['''dtype''']
snake_case__ : Union[str, Any] =[]
snake_case__ : Any =True
@classmethod
def a__ ( cls: Union[str, Any] , __a: Dict[str, Any] = None , __a: Optional[str] = None , __a: Dict=False , **__a: Any , )-> Optional[int]:
lowerCamelCase , lowerCamelCase : Optional[Any] = cls.load_config(
pretrained_model_name_or_path=__a , subfolder=__a , return_unused_kwargs=__a , **__a , )
lowerCamelCase , lowerCamelCase : List[Any] = cls.from_config(__a , return_unused_kwargs=__a , **__a )
if hasattr(__a , """create_state""" ) and getattr(__a , """has_state""" , __a ):
lowerCamelCase : int = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def a__ ( self: List[Any] , __a: Union[str, os.PathLike] , __a: bool = False , **__a: int )-> Optional[Any]:
self.save_config(save_directory=__a , push_to_hub=__a , **__a )
@property
def a__ ( self: str )-> int:
return self._get_compatibles()
@classmethod
def a__ ( cls: int )-> Union[str, Any]:
lowerCamelCase : List[Any] = list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase : int = importlib.import_module(__name__.split(""".""" )[0] )
lowerCamelCase : List[str] = [
getattr(__a , __a ) for c in compatible_classes_str if hasattr(__a , __a )
]
return compatible_classes
def snake_case ( UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : Tuple[int] ) -> jnp.ndarray:
assert len(UpperCamelCase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(UpperCamelCase__ ) - x.ndim) ) , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str=0.9_9_9 , UpperCamelCase__ : Dict=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(UpperCamelCase__ : List[Any] ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
lowerCamelCase : Optional[Any] = []
for i in range(UpperCamelCase__ ):
lowerCamelCase : Tuple = i / num_diffusion_timesteps
lowerCamelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(UpperCamelCase__ ) / alpha_bar(UpperCamelCase__ ) , UpperCamelCase__ ) )
return jnp.array(UpperCamelCase__ , dtype=UpperCamelCase__ )
@flax.struct.dataclass
class A__ :
"""simple docstring"""
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
@classmethod
def a__ ( cls: Optional[Any] , __a: List[str] )-> Optional[Any]:
lowerCamelCase : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCamelCase : List[str] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCamelCase : str = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase : Optional[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCamelCase : Any = 1.0 - betas
lowerCamelCase : Dict = jnp.cumprod(__a , axis=0 )
return cls(
alphas=__a , betas=__a , alphas_cumprod=__a , )
def snake_case ( UpperCamelCase__ : CommonSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray ) -> List[str]:
lowerCamelCase : str = state.alphas_cumprod
lowerCamelCase : Optional[int] = alphas_cumprod[timesteps] ** 0.5
lowerCamelCase : str = sqrt_alpha_prod.flatten()
lowerCamelCase : Optional[Any] = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape )
lowerCamelCase : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCamelCase : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
lowerCamelCase : Any = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def snake_case ( UpperCamelCase__ : CommonSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray ) -> Tuple:
lowerCamelCase , lowerCamelCase : int = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def snake_case ( UpperCamelCase__ : CommonSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray ) -> List[Any]:
lowerCamelCase , lowerCamelCase : Optional[Any] = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 42
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase :int = logging.get_logger(__name__)
__lowerCamelCase :str = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Any ='''deit'''
def __init__( self: Dict , __a: Tuple=768 , __a: str=12 , __a: Union[str, Any]=12 , __a: Optional[int]=3_072 , __a: Dict="gelu" , __a: List[Any]=0.0 , __a: Dict=0.0 , __a: List[Any]=0.02 , __a: Optional[Any]=1e-1_2 , __a: Union[str, Any]=224 , __a: Dict=16 , __a: Optional[Any]=3 , __a: List[Any]=True , __a: Optional[Any]=16 , **__a: List[Any] , )-> Tuple:
super().__init__(**__a )
lowerCamelCase : str = hidden_size
lowerCamelCase : List[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Any = initializer_range
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : Tuple = image_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = encoder_stride
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =version.parse('''1.11''')
@property
def a__ ( self: Dict )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self: int )-> float:
return 1e-4
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__lowerCamelCase :List[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def snake_case ( ) -> List[str]:
lowerCamelCase : Any = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase : str = get_sagemaker_input()
else:
lowerCamelCase : Dict = get_cluster_input()
return config
def snake_case ( UpperCamelCase__ : Optional[int]=None ) -> Optional[Any]:
if subparsers is not None:
lowerCamelCase : Any = subparsers.add_parser("""config""" , description=UpperCamelCase__ )
else:
lowerCamelCase : Optional[Any] = argparse.ArgumentParser("""Accelerate config command""" , description=UpperCamelCase__ )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase : Dict = get_user_input()
if args.config_file is not None:
lowerCamelCase : Tuple = args.config_file
else:
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
lowerCamelCase : str = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(UpperCamelCase__ )
else:
config.to_yaml_file(UpperCamelCase__ )
print(F'accelerate configuration saved at {config_file}' )
def snake_case ( ) -> Tuple:
lowerCamelCase : List[str] = config_command_parser()
lowerCamelCase : List[str] = parser.parse_args()
config_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 1
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCamelCase :Optional[Any] = random.Random()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any]=1.0 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=None ) -> List[str]:
if rng is None:
lowerCamelCase : Union[str, Any] = global_rng
lowerCamelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self: int , __a: Optional[Any] , __a: List[Any]=7 , __a: Optional[int]=400 , __a: str=2_000 , __a: Dict=10 , __a: Optional[int]=160 , __a: Union[str, Any]=8 , __a: List[str]=0.0 , __a: Union[str, Any]=4_000 , __a: Optional[int]=False , __a: List[str]=True , )-> List[Any]:
lowerCamelCase : Tuple = parent
lowerCamelCase : Dict = batch_size
lowerCamelCase : Union[str, Any] = min_seq_length
lowerCamelCase : Union[str, Any] = max_seq_length
lowerCamelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase : List[str] = padding_value
lowerCamelCase : List[Any] = sampling_rate
lowerCamelCase : Tuple = return_attention_mask
lowerCamelCase : List[str] = do_normalize
lowerCamelCase : str = feature_size
lowerCamelCase : Union[str, Any] = chunk_length
lowerCamelCase : int = hop_length
def a__ ( self: List[str] )-> Tuple:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self: Dict , __a: str=False , __a: str=False )-> List[Any]:
def _flatten(__a: Optional[Any] ):
return list(itertools.chain(*__a ) )
if equal_length:
lowerCamelCase : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase : Union[str, Any] = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[str] = WhisperFeatureExtractionTester(self )
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : int = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
lowerCamelCase : int = self.feature_extraction_class.from_pretrained(__a )
lowerCamelCase : str = feat_extract_first.to_dict()
lowerCamelCase : List[str] = feat_extract_second.to_dict()
lowerCamelCase : Optional[Any] = feat_extract_first.mel_filters
lowerCamelCase : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__a , __a ) )
self.assertEqual(__a , __a )
def a__ ( self: Optional[Any] )-> List[str]:
lowerCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Optional[int] = os.path.join(__a , """feat_extract.json""" )
feat_extract_first.to_json_file(__a )
lowerCamelCase : Optional[int] = self.feature_extraction_class.from_json_file(__a )
lowerCamelCase : int = feat_extract_first.to_dict()
lowerCamelCase : Union[str, Any] = feat_extract_second.to_dict()
lowerCamelCase : str = feat_extract_first.mel_filters
lowerCamelCase : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__a , __a ) )
self.assertEqual(__a , __a )
def a__ ( self: List[Any] )-> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase : int = [np.asarray(__a ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase : Dict = feature_extractor(__a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase : Optional[int] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCamelCase : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test batched
lowerCamelCase : Any = feature_extractor(__a , return_tensors="""np""" ).input_features
lowerCamelCase : List[str] = feature_extractor(__a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase : Tuple = np.asarray(__a )
lowerCamelCase : Optional[Any] = feature_extractor(__a , return_tensors="""np""" ).input_features
lowerCamelCase : Union[str, Any] = feature_extractor(__a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test truncation required
lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowerCamelCase : Union[str, Any] = [np.asarray(__a ) for speech_input in speech_inputs]
lowerCamelCase : Dict = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCamelCase : int = [np.asarray(__a ) for speech_input in speech_inputs_truncated]
lowerCamelCase : Optional[int] = feature_extractor(__a , return_tensors="""np""" ).input_features
lowerCamelCase : Optional[Any] = feature_extractor(__a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
def a__ ( self: Union[str, Any] )-> str:
import torch
lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : Any = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCamelCase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase : Dict = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase : Tuple = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self: List[Any] , __a: Union[str, Any] )-> Tuple:
lowerCamelCase : List[str] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase : Optional[int] = ds.sort("""id""" ).select(range(__a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self: Optional[int] )-> Tuple:
# fmt: off
lowerCamelCase : List[str] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowerCamelCase : int = self._load_datasamples(1 )
lowerCamelCase : Union[str, Any] = WhisperFeatureExtractor()
lowerCamelCase : List[Any] = feature_extractor(__a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __a , atol=1e-4 ) )
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : List[str] = self._load_datasamples(1 )[0]
lowerCamelCase : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowerCamelCase : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__a )[0]
self.assertTrue(np.all(np.mean(__a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__a ) - 1 ) < 1e-3 ) )
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(DDPMScheduler,)
def a__ ( self: Any , **__a: int )-> int:
lowerCamelCase : str = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> Tuple:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: Any )-> Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: str )-> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a )
def a__ ( self: Union[str, Any] )-> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
self.check_over_configs(thresholding=__a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def a__ ( self: Tuple )-> List[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> Optional[int]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__a )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Optional[int] = self.get_scheduler_config()
lowerCamelCase : Tuple = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def a__ ( self: str )-> int:
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Tuple = self.get_scheduler_config()
lowerCamelCase : List[str] = scheduler_class(**__a )
lowerCamelCase : Optional[int] = len(__a )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter
lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
lowerCamelCase : Union[str, Any] = model(__a , __a )
# 2. predict previous mean of sample x_t-1
lowerCamelCase : Optional[Any] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase : Tuple = pred_prev_sample
lowerCamelCase : List[Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def a__ ( self: Any )-> str:
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[Any] = scheduler_class(**__a )
lowerCamelCase : Tuple = len(__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : int = self.dummy_sample_deter
lowerCamelCase : Any = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
lowerCamelCase : List[str] = model(__a , __a )
# 2. predict previous mean of sample x_t-1
lowerCamelCase : List[Any] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase : List[Any] = pred_prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : List[str] = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def a__ ( self: Any )-> Dict:
lowerCamelCase : List[Any] = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : Union[str, Any] = scheduler_class(**__a )
lowerCamelCase : str = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a )
lowerCamelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(__a ):
if i == len(__a ) - 1:
lowerCamelCase : str = -1
else:
lowerCamelCase : Optional[int] = timesteps[i + 1]
lowerCamelCase : Optional[Any] = scheduler.previous_timestep(__a )
lowerCamelCase : Optional[int] = prev_t.item()
self.assertEqual(__a , __a )
def a__ ( self: Any )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase : int = self.get_scheduler_config()
lowerCamelCase : str = scheduler_class(**__a )
lowerCamelCase : str = [100, 87, 50, 51, 0]
with self.assertRaises(__a , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__a )
def a__ ( self: Optional[Any] )-> List[str]:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
lowerCamelCase : Tuple = [100, 87, 50, 1, 0]
lowerCamelCase : Any = len(__a )
with self.assertRaises(__a , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a )
def a__ ( self: Optional[int] )-> Any:
lowerCamelCase : List[Any] = self.scheduler_classes[0]
lowerCamelCase : List[str] = self.get_scheduler_config()
lowerCamelCase : Union[str, Any] = scheduler_class(**__a )
lowerCamelCase : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__a )
| 42
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 1
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class A__ :
"""simple docstring"""
@staticmethod
def a__ ( *__a: Optional[int] , **__a: Optional[int] )-> Optional[Any]:
pass
def snake_case ( UpperCamelCase__ : Image ) -> str:
lowerCamelCase : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def a__ ( self: Optional[Any] , __a: Tuple , __a: Optional[int] , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[Any] = DepthEstimationPipeline(model=__a , image_processor=__a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a__ ( self: Any , __a: List[str] , __a: List[str] )-> Optional[int]:
lowerCamelCase : Union[str, Any] = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __a )
import datasets
lowerCamelCase : Union[str, Any] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCamelCase : Optional[int] = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , __a , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def a__ ( self: Optional[Any] )-> List[str]:
pass
@slow
@require_torch
def a__ ( self: Union[str, Any] )-> Optional[Any]:
lowerCamelCase : str = """Intel/dpt-large"""
lowerCamelCase : List[Any] = pipeline("""depth-estimation""" , model=__a )
lowerCamelCase : List[str] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
lowerCamelCase : Dict = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.6_62 )
@require_torch
def a__ ( self: Optional[Any] )-> List[str]:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['BeitFeatureExtractor']
__lowerCamelCase :Tuple = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Tuple = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : str = None , UpperCamelCase__ : str = None , ) -> int:
if config_name_or_path is None:
lowerCamelCase : str = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
lowerCamelCase : List[str] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : str = question_encoder_name_or_path
lowerCamelCase : Optional[int] = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
lowerCamelCase : Tuple = RagConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Any = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : List[str] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
UpperCamelCase__ , UpperCamelCase__ , config=UpperCamelCase__ )
rag_model.save_pretrained(UpperCamelCase__ )
# Sanity check.
model_class.from_pretrained(UpperCamelCase__ )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__lowerCamelCase :str = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
__lowerCamelCase :Dict = parser.parse_args()
__lowerCamelCase :int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 42
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def snake_case ( UpperCamelCase__ : List[Any] ) -> Optional[int]:
if hor == 128:
lowerCamelCase : int = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase : Any = (32, 128, 256)
lowerCamelCase : Optional[int] = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase : Optional[int] = (32, 64, 128, 256)
lowerCamelCase : List[str] = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase : int = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
lowerCamelCase : List[Any] = model.state_dict()
lowerCamelCase : Optional[Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase : Dict = UNetaDModel(**UpperCamelCase__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase : Union[str, Any] = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( ) -> List[Any]:
lowerCamelCase : str = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase : Union[str, Any] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase : str = model
lowerCamelCase : Any = UNetaDModel(**UpperCamelCase__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase : Dict = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase : List[Any] = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
while second != 0:
lowerCamelCase : int = first & second
first ^= second
lowerCamelCase : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase :Tuple = int(input('Enter the first number: ').strip())
__lowerCamelCase :List[Any] = int(input('Enter the second number: ').strip())
print(f"""{add(first, second) = }""")
| 42
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] =['''transformers''', '''torch''', '''note_seq''']
def __init__( self: Union[str, Any] , *__a: Tuple , **__a: Any )-> Dict:
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: Tuple , **__a: Dict )-> Tuple:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def a__ ( cls: List[Any] , *__a: Optional[int] , **__a: List[Any] )-> Optional[int]:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class A__ :
"""simple docstring"""
@staticmethod
def a__ ( *__a: List[str] , **__a: Optional[Any] )-> Dict:
pass
def snake_case ( UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCamelCase :List[str] = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a__ ( self: str , __a: Optional[Any] , __a: Tuple , __a: List[str] )-> int:
lowerCamelCase : Optional[Any] = pipeline(
"""document-question-answering""" , model=__a , tokenizer=__a , image_processor=__a )
lowerCamelCase : List[Any] = INVOICE_URL
lowerCamelCase : str = list(zip(*apply_tesseract(load_image(__a ) , __a , """""" ) ) )
lowerCamelCase : Optional[Any] = """What is the placebo?"""
lowerCamelCase : Tuple = [
{
"""image""": load_image(__a ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def a__ ( self: List[Any] , __a: List[str] , __a: Tuple )-> str:
lowerCamelCase : Optional[int] = dqa_pipeline(__a , top_k=2 )
self.assertEqual(
__a , [
[
{"""score""": ANY(__a ), """answer""": ANY(__a ), """start""": ANY(__a ), """end""": ANY(__a )},
{"""score""": ANY(__a ), """answer""": ANY(__a ), """start""": ANY(__a ), """end""": ANY(__a )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a__ ( self: Optional[int] )-> Any:
lowerCamelCase : List[str] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
lowerCamelCase : Optional[int] = INVOICE_URL
lowerCamelCase : Optional[int] = """How many cats are there?"""
lowerCamelCase : Any = [
{"""score""": 0.00_01, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.00_01, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
lowerCamelCase : Optional[int] = dqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(nested_simplify(__a , decimals=4 ) , __a )
lowerCamelCase : Union[str, Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(__a , decimals=4 ) , __a )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase : Union[str, Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCamelCase : Tuple = dqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(__a , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase : List[str] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCamelCase : str = []
lowerCamelCase : int = []
lowerCamelCase : Dict = dqa_pipeline(image=__a , question=__a , words=__a , boxes=__a , top_k=2 )
self.assertEqual(__a , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : Tuple = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
lowerCamelCase : Optional[Any] = INVOICE_URL
lowerCamelCase : Union[str, Any] = """What is the invoice number?"""
lowerCamelCase : Dict = dqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCamelCase : Optional[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCamelCase : Tuple = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Optional[Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
lowerCamelCase : Union[str, Any] = INVOICE_URL
lowerCamelCase : Any = """What is the invoice number?"""
lowerCamelCase : Any = dqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCamelCase : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCamelCase : Optional[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : str = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__a )
lowerCamelCase : str = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__a , revision="""3dc6de3""" , )
lowerCamelCase : Tuple = INVOICE_URL
lowerCamelCase : int = """What is the invoice number?"""
lowerCamelCase : List[str] = dqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
lowerCamelCase : Optional[int] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
lowerCamelCase : Union[str, Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
lowerCamelCase : str = list(zip(*apply_tesseract(load_image(__a ) , __a , """""" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase : Optional[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a__ ( self: str )-> Dict:
lowerCamelCase : int = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__a )
lowerCamelCase : int = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__a , revision="""3dc6de3""" , max_seq_len=50 , )
lowerCamelCase : int = INVOICE_URL
lowerCamelCase : int = """What is the invoice number?"""
lowerCamelCase : List[Any] = dqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCamelCase : str = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
lowerCamelCase : List[str] = list(zip(*apply_tesseract(load_image(__a ) , __a , """""" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase : Dict = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
lowerCamelCase : str = INVOICE_URL
lowerCamelCase : Any = """What is the invoice number?"""
lowerCamelCase : Tuple = dqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(nested_simplify(__a , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def a__ ( self: Optional[Any] )-> Tuple:
pass
| 42
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCamelCase :str = 10
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
for i in range(UpperCamelCase__ , UpperCamelCase__ ):
if array[i] == target:
return i
return -1
def snake_case ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
lowerCamelCase : Any = 0
lowerCamelCase : Tuple = len(UpperCamelCase__ )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = (left + right) // 3 + 1
lowerCamelCase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCamelCase : Any = one_third - 1
elif array[two_third] < target:
lowerCamelCase : Optional[Any] = two_third + 1
else:
lowerCamelCase : Tuple = one_third + 1
lowerCamelCase : List[str] = two_third - 1
else:
return -1
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = (left + right) // 3 + 1
lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase__ , one_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase :int = input('Enter numbers separated by comma:\n').strip()
__lowerCamelCase :Dict = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__lowerCamelCase :Any = int(input('Enter the number to be found in the list:\n').strip())
__lowerCamelCase :Union[str, Any] = ite_ternary_search(collection, target)
__lowerCamelCase :int = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int ) -> List[str]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[tuple[int, int]]:
lowerCamelCase : Any = 0
lowerCamelCase : int = len(UpperCamelCase__ ) # No of vertices in graph
lowerCamelCase : Any = [0] * n
lowerCamelCase : str = [False] * n
def dfs(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
lowerCamelCase : Dict = True
lowerCamelCase : Tuple = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , id_ )
lowerCamelCase : Optional[Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase : Union[str, Any] = min(low[at] , low[to] )
lowerCamelCase : list[tuple[int, int]] = []
for i in range(UpperCamelCase__ ):
if not visited[i]:
dfs(UpperCamelCase__ , -1 , UpperCamelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case ( UpperCamelCase__ : bytes , UpperCamelCase__ : int ) -> np.array:
lowerCamelCase : List[str] = F'{sampling_rate}'
lowerCamelCase : Optional[Any] = """1"""
lowerCamelCase : str = """f32le"""
lowerCamelCase : Optional[int] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(UpperCamelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Dict = ffmpeg_process.communicate(UpperCamelCase__ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : List[str] = np.frombuffer(UpperCamelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : str = "f32le" , ) -> Dict:
lowerCamelCase : Any = F'{sampling_rate}'
lowerCamelCase : Tuple = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Tuple = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
lowerCamelCase : Optional[Any] = platform.system()
if system == "Linux":
lowerCamelCase : Optional[int] = """alsa"""
lowerCamelCase : Dict = """default"""
elif system == "Darwin":
lowerCamelCase : Any = """avfoundation"""
lowerCamelCase : str = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Dict = """default"""
lowerCamelCase : Union[str, Any] = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Union[str, Any] = _ffmpeg_stream(UpperCamelCase__ , UpperCamelCase__ )
for item in iterator:
yield item
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[Tuple[float, float], float]] = None , UpperCamelCase__ : str = "f32le" , ) -> Any:
if stream_chunk_s is not None:
lowerCamelCase : str = stream_chunk_s
else:
lowerCamelCase : Optional[int] = chunk_length_s
lowerCamelCase : Union[str, Any] = ffmpeg_microphone(UpperCamelCase__ , UpperCamelCase__ , format_for_conversion=UpperCamelCase__ )
if format_for_conversion == "s16le":
lowerCamelCase : int = np.intaa
lowerCamelCase : Dict = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Optional[int] = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCamelCase__ , (int, float) ):
lowerCamelCase : List[Any] = [stride_length_s, stride_length_s]
lowerCamelCase : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : str = datetime.datetime.now()
lowerCamelCase : Any = datetime.timedelta(seconds=UpperCamelCase__ )
for item in chunk_bytes_iter(UpperCamelCase__ , UpperCamelCase__ , stride=(stride_left, stride_right) , stream=UpperCamelCase__ ):
# Put everything back in numpy scale
lowerCamelCase : Tuple = np.frombuffer(item["""raw"""] , dtype=UpperCamelCase__ )
lowerCamelCase : List[str] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : List[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple[int, int] , UpperCamelCase__ : bool = False ) -> Optional[Any]:
lowerCamelCase : Dict = B""""""
lowerCamelCase , lowerCamelCase : Optional[int] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
lowerCamelCase : Dict = 0
for raw in iterator:
acc += raw
if stream and len(UpperCamelCase__ ) < chunk_len:
lowerCamelCase : Union[str, Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCamelCase__ ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : List[str] = (_stride_left, stride_right)
lowerCamelCase : int = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : str = False
yield item
lowerCamelCase : Union[str, Any] = stride_left
lowerCamelCase : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCamelCase__ ) > stride_left:
lowerCamelCase : Dict = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[str] = False
yield item
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int ) -> List[Any]:
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(UpperCamelCase__ , stdout=subprocess.PIPE , bufsize=UpperCamelCase__ ) as ffmpeg_process:
while True:
lowerCamelCase : List[str] = ffmpeg_process.stdout.read(UpperCamelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 42
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 1
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> List[Any]:
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__a )
lowerCamelCase : str = -1
lowerCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
lowerCamelCase : int = model.generate(__a , max_new_tokens=10 , do_sample=__a )
lowerCamelCase : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase : Tuple = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase : Any = cs.out[:-1]
self.assertEqual(__a , __a )
def a__ ( self: str )-> Optional[int]:
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__a )
lowerCamelCase : str = -1
lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
lowerCamelCase : Any = model.generate(__a , max_new_tokens=10 , do_sample=__a )
lowerCamelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase : int = TextIteratorStreamer(__a )
lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCamelCase : List[str] = Thread(target=model.generate , kwargs=__a )
thread.start()
lowerCamelCase : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__a )
lowerCamelCase : Optional[Any] = -1
lowerCamelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
lowerCamelCase : int = model.generate(__a , max_new_tokens=10 , do_sample=__a )
lowerCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase : str = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase : str = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase : List[str] = cs.out[:-1]
self.assertEqual(__a , __a )
def a__ ( self: List[str] )-> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__a )
lowerCamelCase : Optional[int] = -1
lowerCamelCase : List[Any] = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase : Dict = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase : Union[str, Any] = cs.out[:-1] # Remove the final "\n"
lowerCamelCase : str = tokenizer(__a , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a__ ( self: int )-> List[str]:
lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__a )
lowerCamelCase : Optional[Any] = -1
lowerCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
lowerCamelCase : Optional[int] = TextIteratorStreamer(__a , timeout=0.0_01 )
lowerCamelCase : int = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCamelCase : Tuple = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
lowerCamelCase : Optional[int] = """"""
for new_text in streamer:
streamer_text += new_text
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from collections import defaultdict
class A__ :
"""simple docstring"""
def __init__( self: int , __a: Tuple , __a: Dict )-> Optional[int]:
lowerCamelCase : Union[str, Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase : int = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__a ) )
]
lowerCamelCase : Optional[Any] = defaultdict(__a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase : Dict = (1 << len(__a )) - 1
def a__ ( self: Union[str, Any] , __a: str , __a: List[Any] )-> List[str]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase : Dict = self.count_ways_until(__a , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowerCamelCase : Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def a__ ( self: Tuple , __a: List[str] )-> Union[str, Any]:
# Store the list of persons for each task
for i in range(len(__a ) ):
for j in task_performed[i]:
self.task[j].append(__a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__lowerCamelCase :Optional[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__lowerCamelCase :List[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 42
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''dandelin/vilt-b32-finetuned-vqa'''
snake_case__ : int =(
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
snake_case__ : Optional[int] ='''image_qa'''
snake_case__ : Tuple =AutoProcessor
snake_case__ : Tuple =AutoModelForVisualQuestionAnswering
snake_case__ : Tuple =['''image''', '''text''']
snake_case__ : int =['''text''']
def __init__( self: List[str] , *__a: str , **__a: Optional[int] )-> List[str]:
requires_backends(self , ["""vision"""] )
super().__init__(*__a , **__a )
def a__ ( self: List[Any] , __a: "Image" , __a: str )-> Optional[int]:
return self.pre_processor(__a , __a , return_tensors="""pt""" )
def a__ ( self: List[str] , __a: List[str] )-> Optional[int]:
with torch.no_grad():
return self.model(**__a ).logits
def a__ ( self: Optional[int] , __a: int )-> Optional[int]:
lowerCamelCase : List[str] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 42
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class A__ :
"""simple docstring"""
snake_case__ : Optional[Any] =None
def a__ ( self: List[Any] )-> int:
lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase : int = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __a )
def a__ ( self: Optional[Any] )-> int:
lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : str = os.path.join(__a , """feat_extract.json""" )
feat_extract_first.to_json_file(__a )
lowerCamelCase : Optional[Any] = self.feature_extraction_class.from_json_file(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def a__ ( self: Union[str, Any] )-> List[str]:
lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Any = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
lowerCamelCase : Optional[int] = self.feature_extraction_class.from_pretrained(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def a__ ( self: int )-> List[str]:
lowerCamelCase : Tuple = self.feature_extraction_class()
self.assertIsNotNone(__a )
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :List[str] = logging.get_logger(__name__)
__lowerCamelCase :Optional[Any] = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''visual_bert'''
def __init__( self: Optional[Any] , __a: Union[str, Any]=30_522 , __a: Tuple=768 , __a: Dict=512 , __a: Dict=12 , __a: Any=12 , __a: Optional[Any]=3_072 , __a: Tuple="gelu" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[int]=512 , __a: str=2 , __a: List[Any]=0.02 , __a: List[Any]=1e-1_2 , __a: List[str]=False , __a: Dict=True , __a: Optional[int]=1 , __a: str=0 , __a: Dict=2 , **__a: Union[str, Any] , )-> Optional[int]:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Optional[int] = visual_embedding_dim
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Dict = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[Any] = type_vocab_size
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : List[Any] = bypass_transformer
lowerCamelCase : int = special_visual_initialize
| 42
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 1
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 42
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 1
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: List[str] )-> str:
lowerCamelCase : Dict = 0
lowerCamelCase : str = [0]
lowerCamelCase : Union[str, Any] = [0]
lowerCamelCase : Tuple = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 0 )
lowerCamelCase : str = [60]
lowerCamelCase : str = [10]
lowerCamelCase : str = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 0 )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : str = 3
lowerCamelCase : List[str] = [1, 2, 3]
lowerCamelCase : List[Any] = [3, 2, 1]
lowerCamelCase : Tuple = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 5 )
def a__ ( self: int )-> str:
lowerCamelCase : List[Any] = 50
lowerCamelCase : Tuple = [60, 100, 120]
lowerCamelCase : Optional[Any] = [10, 20, 30]
lowerCamelCase : List[str] = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 220 )
if __name__ == "__main__":
unittest.main()
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import copy
import re
class A__ :
"""simple docstring"""
snake_case__ : Optional[Any] ='''hp'''
snake_case__ : Optional[Any] ={}
snake_case__ : int =None
@classmethod
def a__ ( cls: int , __a: int , __a: str )-> List[str]:
lowerCamelCase : Dict = prefix
lowerCamelCase : Union[str, Any] = defaults
cls.build_naming_info()
@staticmethod
def a__ ( __a: Union[str, Any] , __a: List[str] )-> Optional[int]:
if len(__a ) == 0:
return ""
lowerCamelCase : Tuple = None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
lowerCamelCase : Tuple = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowerCamelCase : Any = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a: Union[str, Any] ):
lowerCamelCase : List[str] = """"""
while integer != 0:
lowerCamelCase : List[Any] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
lowerCamelCase : Tuple = 0
while True:
lowerCamelCase : str = word + """#""" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
lowerCamelCase : List[Any] = sword
break
lowerCamelCase : Dict = short_word
lowerCamelCase : Dict = word
return short_word
@staticmethod
def a__ ( __a: List[str] , __a: Optional[int] )-> str:
lowerCamelCase : Tuple = param_name.split("""_""" )
lowerCamelCase : Dict = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowerCamelCase : List[str] = ["""""", """_"""]
for separator in separators:
lowerCamelCase : Any = separator.join(__a )
if shortname not in info["reverse_short_param"]:
lowerCamelCase : List[str] = shortname
lowerCamelCase : int = param_name
return shortname
return param_name
@staticmethod
def a__ ( __a: Any , __a: Dict )-> List[str]:
lowerCamelCase : Dict = TrialShortNamer.shortname_for_key(__a , __a )
lowerCamelCase : Optional[int] = short_name
lowerCamelCase : Dict = param_name
@classmethod
def a__ ( cls: int )-> Optional[int]:
if cls.NAMING_INFO is not None:
return
lowerCamelCase : Dict = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
lowerCamelCase : int = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
lowerCamelCase : int = info
@classmethod
def a__ ( cls: Tuple , __a: Union[str, Any] )-> str:
cls.build_naming_info()
assert cls.PREFIX is not None
lowerCamelCase : Union[str, Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowerCamelCase : Union[str, Any] = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__a , __a ):
lowerCamelCase : Optional[int] = 1 if v else 0
lowerCamelCase : Tuple = """""" if isinstance(__a , (int, float) ) else """-"""
lowerCamelCase : List[str] = f'{key}{sep}{v}'
name.append(__a )
return "_".join(__a )
@classmethod
def a__ ( cls: Any , __a: Optional[Any] )-> List[str]:
lowerCamelCase : int = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowerCamelCase : Dict = []
else:
lowerCamelCase : List[str] = repr.split("""_""" )
lowerCamelCase : List[str] = {}
for value in values:
if "-" in value:
lowerCamelCase , lowerCamelCase : Any = value.split("""-""" )
else:
lowerCamelCase : str = re.sub("""[0-9.]""" , """""" , __a )
lowerCamelCase : Dict = float(re.sub("""[^0-9.]""" , """""" , __a ) )
lowerCamelCase : Union[str, Any] = cls.NAMING_INFO["""reverse_short_param"""][p_k]
lowerCamelCase : str = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowerCamelCase : int = cls.DEFAULTS[k]
return parameters
| 42
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 1
|
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A__ :
"""simple docstring"""
@property
def a__ ( self: int )-> List[Any]:
return self.get_dummy_input()
@property
def a__ ( self: Tuple )-> Union[str, Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def a__ ( self: Union[str, Any] , __a: Any=True , __a: Optional[int]=False , __a: Dict=False , __a: List[str]=False , )-> Union[str, Any]:
lowerCamelCase : Tuple = 4
lowerCamelCase : Optional[int] = 32
lowerCamelCase : Union[str, Any] = (32, 32)
lowerCamelCase : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase : List[str] = torch.device(__a )
lowerCamelCase : Dict = (batch_size, num_channels) + sizes
lowerCamelCase : int = randn_tensor(__a , generator=__a , device=__a )
lowerCamelCase : List[Any] = {"""hidden_states""": hidden_states}
if include_temb:
lowerCamelCase : Dict = 128
lowerCamelCase : Union[str, Any] = randn_tensor((batch_size, temb_channels) , generator=__a , device=__a )
if include_res_hidden_states_tuple:
lowerCamelCase : List[str] = torch.manual_seed(1 )
lowerCamelCase : Any = (randn_tensor(__a , generator=__a , device=__a ),)
if include_encoder_hidden_states:
lowerCamelCase : Optional[int] = floats_tensor((batch_size, 32, 32) ).to(__a )
if include_skip_sample:
lowerCamelCase : Tuple = randn_tensor(((batch_size, 3) + sizes) , generator=__a , device=__a )
return dummy_input
def a__ ( self: Dict )-> Dict:
lowerCamelCase : int = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
lowerCamelCase : Any = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
lowerCamelCase : Dict = self.dummy_input
return init_dict, inputs_dict
def a__ ( self: str , __a: Dict )-> Any:
lowerCamelCase , lowerCamelCase : str = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase : str = self.block_class(**__a )
unet_block.to(__a )
unet_block.eval()
with torch.no_grad():
lowerCamelCase : Optional[int] = unet_block(**__a )
if isinstance(__a , __a ):
lowerCamelCase : Any = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCamelCase : Dict = output[0, -1, -3:, -3:]
lowerCamelCase : Dict = torch.tensor(__a ).to(__a )
assert torch_all_close(output_slice.flatten() , __a , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase : str = self.block_class(**__a )
model.to(__a )
model.train()
lowerCamelCase : List[str] = model(**__a )
if isinstance(__a , __a ):
lowerCamelCase : Any = output[0]
lowerCamelCase : Union[str, Any] = torch.device(__a )
lowerCamelCase : List[str] = randn_tensor(output.shape , device=__a )
lowerCamelCase : str = torch.nn.functional.mse_loss(__a , __a )
loss.backward()
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowerCamelCase :int = get_tests_dir('fixtures')
__lowerCamelCase :List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
__lowerCamelCase :str = get_tests_dir('fixtures/dummy-config.json')
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> Dict:
lowerCamelCase : str = 0
def a__ ( self: Union[str, Any] )-> str:
lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__a , __a )
def a__ ( self: Tuple )-> List[str]:
lowerCamelCase : Any = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def a__ ( self: List[Any] )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("""feature_extractor_type""" )
lowerCamelCase : str = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
lowerCamelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
lowerCamelCase : Any = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def a__ ( self: List[str] )-> str:
lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def a__ ( self: Any )-> Tuple:
with self.assertRaisesRegex(
__a , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def a__ ( self: Optional[Any] )-> int:
with self.assertRaisesRegex(
__a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase : str = AutoFeatureExtractor.from_pretrained(__a , revision="""aaaaaa""" )
def a__ ( self: List[str] )-> Tuple:
with self.assertRaisesRegex(
__a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCamelCase : int = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def a__ ( self: Union[str, Any] )-> List[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
lowerCamelCase : int = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a )
lowerCamelCase : Any = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def a__ ( self: int )-> Optional[Any]:
try:
AutoConfig.register("""custom""" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase : Tuple = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def a__ ( self: int )-> int:
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] =True
try:
AutoConfig.register("""custom""" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
lowerCamelCase : int = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(__a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple =CTRLTokenizer
snake_case__ : List[Any] =False
snake_case__ : Optional[Any] =False
def a__ ( self: str )-> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : str = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
lowerCamelCase : int = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : Union[str, Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
lowerCamelCase : Optional[Any] = {"""unk_token""": """<unk>"""}
lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def a__ ( self: str , **__a: str )-> Optional[int]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: Any , __a: Any )-> Optional[int]:
lowerCamelCase : Optional[Any] = """adapt react readapt apt"""
lowerCamelCase : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : Any = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase : Union[str, Any] = """adapt react readapt apt"""
lowerCamelCase : str = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
lowerCamelCase : Dict = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase : Optional[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 1
|
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[int] , __a: str , __a: Union[str, Any]=13 , __a: str=30 , __a: int=2 , __a: Any=3 , __a: Optional[int]=True , __a: Union[str, Any]=True , __a: Optional[int]=32 , __a: Any=5 , __a: Optional[Any]=4 , __a: Optional[int]=37 , __a: Optional[int]="gelu" , __a: Optional[Any]=0.1 , __a: Dict=0.1 , __a: Any=10 , __a: Union[str, Any]=0.02 , __a: str=3 , __a: Dict=0.6 , __a: Optional[Any]=None , )-> Union[str, Any]:
lowerCamelCase : str = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : str = attention_probs_dropout_prob
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : int = initializer_range
lowerCamelCase : Any = mask_ratio
lowerCamelCase : Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2
lowerCamelCase : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self: Optional[Any] )-> Union[str, Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def a__ ( self: Any , __a: Dict , __a: Any , __a: int )-> Union[str, Any]:
lowerCamelCase : Any = ViTMAEModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self: Union[str, Any] , __a: Any , __a: Union[str, Any] , __a: Optional[Any] )-> Tuple:
lowerCamelCase : str = ViTMAEForPreTraining(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
lowerCamelCase : Dict = (self.image_size // self.patch_size) ** 2
lowerCamelCase : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : str = ViTMAEForPreTraining(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : List[str] = model(__a )
lowerCamelCase : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def a__ ( self: Union[str, Any] )-> List[Any]:
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = config_and_inputs
lowerCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
snake_case__ : Union[str, Any] ={'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
snake_case__ : int =False
snake_case__ : str =False
snake_case__ : str =False
snake_case__ : Union[str, Any] =False
def a__ ( self: Optional[Any] )-> str:
lowerCamelCase : List[Any] = ViTMAEModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Union[str, Any] )-> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def a__ ( self: Any )-> Optional[Any]:
pass
def a__ ( self: Optional[Any] )-> Optional[Any]:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: int )-> str:
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__a )
lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : List[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: int )-> Dict:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Optional[Any]:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def a__ ( self: str , __a: Union[str, Any] , __a: List[str] , __a: List[str] )-> Any:
# make masks reproducible
np.random.seed(2 )
lowerCamelCase : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase : Optional[int] = torch.from_numpy(__a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase : Any = pt_noise
super().check_pt_tf_models(__a , __a , __a )
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
model.to(__a )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : Dict = outputs[0].cpu().numpy()
lowerCamelCase : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
lowerCamelCase : Dict = model_class.from_pretrained(__a )
model.to(__a )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__a , __a ) )
# Make sure we don't have nans
lowerCamelCase : List[str] = after_outputs[0].cpu().numpy()
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def a__ ( self: str )-> Dict:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def a__ ( self: List[str] )-> Tuple:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def a__ ( self: Optional[int] )-> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: Optional[Any] )-> int:
pass
@slow
def a__ ( self: str )-> Any:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = ViTMAEModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> List[str]:
lowerCamelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Any )-> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def a__ ( self: Optional[int] )-> Tuple:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase : int = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(__a )
lowerCamelCase : Tuple = self.default_image_processor
lowerCamelCase : Dict = prepare_img()
lowerCamelCase : List[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase : Tuple = ViTMAEConfig()
lowerCamelCase : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase : Union[str, Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(**__a , noise=torch.from_numpy(__a ).to(device=__a ) )
# verify the logits
lowerCamelCase : int = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : str = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__a ) , atol=1e-4 ) )
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowerCamelCase :Any = None
__lowerCamelCase :List[Any] = logging.get_logger(__name__)
__lowerCamelCase :int = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase :int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
__lowerCamelCase :Optional[Any] = {
'google/rembert': 256,
}
__lowerCamelCase :Tuple = '▁'
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] =VOCAB_FILES_NAMES
snake_case__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] =RemBertTokenizer
def __init__( self: Union[str, Any] , __a: Tuple=None , __a: Any=None , __a: str=True , __a: List[str]=True , __a: str=False , __a: Tuple="[CLS]" , __a: Optional[Any]="[SEP]" , __a: Union[str, Any]="<unk>" , __a: List[str]="[SEP]" , __a: str="<pad>" , __a: str="[CLS]" , __a: List[str]="[MASK]" , **__a: Dict , )-> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : Dict = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : str = remove_space
lowerCamelCase : Any = keep_accents
lowerCamelCase : Optional[Any] = vocab_file
lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def a__ ( self: str , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : Optional[Any] = [self.sep_token_id]
lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self: Optional[Any] , __a: List[int] , __a: Optional[List[int]] = None , __a: bool = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def a__ ( self: Dict , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : Union[str, Any] = [self.sep_token_id]
lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self: List[str] , __a: str , __a: Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__a ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__a ) )
return
lowerCamelCase : Optional[Any] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 42
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def snake_case ( UpperCamelCase__ : int ) -> list[str]:
lowerCamelCase : List[Any] = []
lowerCamelCase : str = 11
lowerCamelCase : int = int("""1""" + """0""" * digit_len )
for num in range(UpperCamelCase__ , UpperCamelCase__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCamelCase__ , UpperCamelCase__ ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
lowerCamelCase : List[Any] = 10
return solutions
def snake_case ( UpperCamelCase__ : int = 2 ) -> int:
lowerCamelCase : Union[str, Any] = 1.0
for fraction in fraction_list(UpperCamelCase__ ):
lowerCamelCase : Dict = Fraction(UpperCamelCase__ )
result *= frac.denominator / frac.numerator
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 42
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] ='''philschmid/bart-large-cnn-samsum'''
snake_case__ : Optional[int] =(
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case__ : Optional[Any] ='''summarizer'''
snake_case__ : str =AutoTokenizer
snake_case__ : Tuple =AutoModelForSeqaSeqLM
snake_case__ : List[str] =['''text''']
snake_case__ : Dict =['''text''']
def a__ ( self: List[str] , __a: Any )-> str:
return self.pre_processor(__a , return_tensors="""pt""" , truncation=__a )
def a__ ( self: Tuple , __a: Optional[int] )-> List[Any]:
return self.model.generate(**__a )[0]
def a__ ( self: List[str] , __a: str )-> int:
return self.pre_processor.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
| 42
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : Union[str, Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase : Dict = test_metrics
@require_cpu
def a__ ( self: Tuple )-> Optional[int]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def a__ ( self: int )-> List[str]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def a__ ( self: Dict )-> Dict:
self.test_metrics.main()
@require_multi_gpu
def a__ ( self: str )-> str:
print(f'Found {torch.cuda.device_count()} devices.' )
lowerCamelCase : Optional[int] = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 42
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 1
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case ( UpperCamelCase__ : int = 8 ) -> str:
lowerCamelCase : Any = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(UpperCamelCase__ )
lowerCamelCase : str = i // 3
lowerCamelCase : Dict = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase : Any = (
chars_incl
+ random(UpperCamelCase__ , quotient + remainder )
+ random(UpperCamelCase__ , UpperCamelCase__ )
+ random(UpperCamelCase__ , UpperCamelCase__ )
)
lowerCamelCase : int = list(UpperCamelCase__ )
shuffle(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
# random is a generalised function for letters, characters and numbers
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int ) -> str:
return "".join(secrets.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Dict:
pass # Put your code here...
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ) -> Optional[Any]:
pass # Put your code here...
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ) -> Tuple:
pass # Put your code here...
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int = 8 ) -> bool:
if len(UpperCamelCase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase : Optional[int] = any(char in ascii_uppercase for char in password )
lowerCamelCase : int = any(char in ascii_lowercase for char in password )
lowerCamelCase : Any = any(char in digits for char in password )
lowerCamelCase : Any = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case ( ) -> Any:
lowerCamelCase : int = int(input("""Please indicate the max length of your password: """ ).strip() )
lowerCamelCase : Optional[int] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(UpperCamelCase__ ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(UpperCamelCase__ , UpperCamelCase__ ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :List[Any] = {'vocab_file': 'vocab.txt'}
__lowerCamelCase :Dict = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__lowerCamelCase :Union[str, Any] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__lowerCamelCase :str = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =VOCAB_FILES_NAMES
snake_case__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] =PRETRAINED_INIT_CONFIGURATION
snake_case__ : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] =ConvBertTokenizer
def __init__( self: Optional[Any] , __a: Optional[int]=None , __a: List[str]=None , __a: Optional[Any]=True , __a: List[Any]="[UNK]" , __a: Optional[Any]="[SEP]" , __a: Optional[Any]="[PAD]" , __a: int="[CLS]" , __a: str="[MASK]" , __a: Tuple=True , __a: Optional[int]=None , **__a: Any , )-> Optional[int]:
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __a ) != tokenize_chinese_chars
):
lowerCamelCase : List[str] = getattr(__a , normalizer_state.pop("""type""" ) )
lowerCamelCase : Tuple = do_lower_case
lowerCamelCase : List[Any] = strip_accents
lowerCamelCase : List[Any] = tokenize_chinese_chars
lowerCamelCase : List[str] = normalizer_class(**__a )
lowerCamelCase : Any = do_lower_case
def a__ ( self: Union[str, Any] , __a: str , __a: Optional[Any]=None )-> Any:
lowerCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self: Any , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : Optional[int] = [self.sep_token_id]
lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self: int , __a: str , __a: Optional[str] = None )-> Tuple[str]:
lowerCamelCase : List[Any] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 42
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 1
|
"""simple docstring"""
import json
import sys
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Tuple:
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
lowerCamelCase : str = json.load(UpperCamelCase__ )
lowerCamelCase : str = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(UpperCamelCase__ ):
lowerCamelCase : List[str] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'### Benchmark: {benchmark_file_name}' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : Tuple = """|--------|"""
lowerCamelCase : Any = """| new / old (diff) |"""
for metric_name in sorted(UpperCamelCase__ ):
lowerCamelCase : Tuple = benchmark_res[metric_name]
lowerCamelCase : Optional[int] = metric_vals["""new"""]
lowerCamelCase : Dict = metric_vals.get("""old""" , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = metric_vals.get("""diff""" , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = F' {new_val:f}' if isinstance(UpperCamelCase__ , (int, float) ) else """None"""
if old_val is not None:
val_str += F' / {old_val:f}' if isinstance(UpperCamelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F' ({dif_val:f})' if isinstance(UpperCamelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(UpperCamelCase__ ) )
if __name__ == "__main__":
__lowerCamelCase :Dict = sys.argv[1]
__lowerCamelCase :List[str] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase :Dict = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =['''input_features''', '''attention_mask''']
def __init__( self: Optional[Any] , __a: Optional[Any]=80 , __a: Any=16_000 , __a: List[str]=0.0 , __a: Any=10 , __a: List[Any]=25 , __a: Union[str, Any]="hamming_window" , __a: Optional[int]=3_27_68.0 , __a: Optional[Any]=0.97 , __a: List[str]=1.0 , __a: Optional[int]=True , __a: Union[str, Any]=True , __a: Tuple=False , **__a: Tuple , )-> Optional[int]:
super().__init__(feature_size=__a , sampling_rate=__a , padding_value=__a , **__a )
lowerCamelCase : Union[str, Any] = feature_size
lowerCamelCase : Dict = sampling_rate
lowerCamelCase : int = padding_value
lowerCamelCase : Tuple = hop_length
lowerCamelCase : List[Any] = win_length
lowerCamelCase : Dict = frame_signal_scale
lowerCamelCase : Union[str, Any] = preemphasis_coeff
lowerCamelCase : Tuple = mel_floor
lowerCamelCase : Union[str, Any] = normalize_means
lowerCamelCase : Tuple = normalize_vars
lowerCamelCase : Dict = win_function
lowerCamelCase : Dict = return_attention_mask
lowerCamelCase : Any = win_length * sampling_rate // 1_000
lowerCamelCase : Any = hop_length * sampling_rate // 1_000
lowerCamelCase : Dict = optimal_fft_length(self.sample_size )
lowerCamelCase : Any = (self.n_fft // 2) + 1
def a__ ( self: int , __a: np.array )-> np.ndarray:
if self.win_function == "hamming_window":
lowerCamelCase : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__a )
else:
lowerCamelCase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
lowerCamelCase : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowerCamelCase : Dict = spectrogram(
one_waveform * self.frame_signal_scale , window=__a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__a , preemphasis=self.preemphasis_coeff , mel_filters=__a , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def a__ ( self: Optional[int] , __a: Optional[Any] , __a: Tuple , __a: List[Any] )-> List[str]:
# make sure we normalize float32 arrays
if self.normalize_means:
lowerCamelCase : Optional[int] = x[:input_length].mean(axis=0 )
lowerCamelCase : Union[str, Any] = np.subtract(__a , __a )
if self.normalize_vars:
lowerCamelCase : int = x[:input_length].std(axis=0 )
lowerCamelCase : List[str] = np.divide(__a , __a )
if input_length < x.shape[0]:
lowerCamelCase : List[Any] = padding_value
# make sure array is in float32
lowerCamelCase : Optional[Any] = x.astype(np.floataa )
return x
def a__ ( self: Union[str, Any] , __a: List[np.ndarray] , __a: Optional[np.ndarray] = None )-> List[np.ndarray]:
lowerCamelCase : Union[str, Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__a , __a , self.padding_value ) for x, n in zip(__a , __a )]
def __call__( self: int , __a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a: Union[bool, str, PaddingStrategy] = False , __a: Optional[int] = None , __a: bool = False , __a: Optional[int] = None , __a: Optional[bool] = None , __a: Optional[Union[str, TensorType]] = None , __a: Optional[int] = None , **__a: Any , )-> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase : str = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase : int = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase : str = [np.asarray(__a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
lowerCamelCase : str = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : str = [raw_speech]
# extract fbank features
lowerCamelCase : Optional[int] = [self._extract_mfsc_features(__a ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase : Dict = BatchFeature({"""input_features""": features} )
lowerCamelCase : Dict = self.pad(
__a , padding=__a , max_length=__a , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=__a , **__a , )
# make sure list is in array format
lowerCamelCase : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __a ):
lowerCamelCase : Optional[Any] = [np.asarray(__a , dtype=np.floataa ) for feature in input_features]
lowerCamelCase : Optional[int] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCamelCase : Optional[Any] = [np.asarray(__a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCamelCase : Tuple = (
np.array(__a , dtype=np.intaa )
if self._get_padding_strategies(__a , max_length=__a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCamelCase : Union[str, Any] = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__a )
if return_tensors is not None:
lowerCamelCase : Optional[int] = padded_inputs.convert_to_tensors(__a )
return padded_inputs
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : UNetaDModel
snake_case__ : ScoreSdeVeScheduler
def __init__( self: Any , __a: UNetaDModel , __a: ScoreSdeVeScheduler )-> Dict:
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self: List[str] , __a: int = 1 , __a: int = 2_000 , __a: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a: Optional[str] = "pil" , __a: bool = True , **__a: Any , )-> Union[ImagePipelineOutput, Tuple]:
lowerCamelCase : str = self.unet.config.sample_size
lowerCamelCase : List[Any] = (batch_size, 3, img_size, img_size)
lowerCamelCase : str = self.unet
lowerCamelCase : Any = randn_tensor(__a , generator=__a ) * self.scheduler.init_noise_sigma
lowerCamelCase : Any = sample.to(self.device )
self.scheduler.set_timesteps(__a )
self.scheduler.set_sigmas(__a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCamelCase : Optional[Any] = self.unet(__a , __a ).sample
lowerCamelCase : Any = self.scheduler.step_correct(__a , __a , generator=__a ).prev_sample
# prediction step
lowerCamelCase : Any = model(__a , __a ).sample
lowerCamelCase : int = self.scheduler.step_pred(__a , __a , __a , generator=__a )
lowerCamelCase , lowerCamelCase : List[str] = output.prev_sample, output.prev_sample_mean
lowerCamelCase : Tuple = sample_mean.clamp(0 , 1 )
lowerCamelCase : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase : List[Any] = self.numpy_to_pil(__a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__a )
| 42
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 1
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
__lowerCamelCase :Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__lowerCamelCase :Any = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =CamembertTokenizer
snake_case__ : Any =CamembertTokenizerFast
snake_case__ : Dict =True
snake_case__ : Union[str, Any] =True
def a__ ( self: Dict )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : List[Any] = CamembertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Any )-> str:
lowerCamelCase : List[str] = """<pad>"""
lowerCamelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: int )-> Any:
lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__a ) , 1_004 )
def a__ ( self: Any )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def a__ ( self: Dict )-> List[str]:
lowerCamelCase : Any = CamembertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
lowerCamelCase : List[str] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCamelCase : List[Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : int = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : str = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
def a__ ( self: List[str] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : Dict = self.get_rust_tokenizer()
lowerCamelCase : List[Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : Optional[int] = tokenizer.tokenize(__a )
lowerCamelCase : Optional[Any] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Tuple = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : str = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = self.get_rust_tokenizer()
lowerCamelCase : Tuple = tokenizer.encode(__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def a__ ( self: Dict )-> Tuple:
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCamelCase : Optional[Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__a , )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int = 1000 ) -> int:
return sum(e for e in range(3 , UpperCamelCase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : str ) -> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
lowerCamelCase : str = """"""
while len(UpperCamelCase__ ) % 3 != 0:
lowerCamelCase : Optional[Any] = """0""" + bin_string
lowerCamelCase : Optional[Any] = [
bin_string[index : index + 3]
for index in range(len(UpperCamelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCamelCase : int = 0
for index, val in enumerate(UpperCamelCase__ ):
oct_val += int(2 ** (2 - index) * int(UpperCamelCase__ ) )
oct_string += str(UpperCamelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Tuple = logging.get_logger(__name__)
def snake_case ( UpperCamelCase__ : Optional[Any] ) -> int:
lowerCamelCase : str = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase : Tuple = [144, 192, 240]
lowerCamelCase : List[str] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase : Tuple = [96, 120, 144]
lowerCamelCase : int = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase : Dict = [64, 80, 96]
lowerCamelCase : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase : str = 0.0_5
lowerCamelCase : Dict = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowerCamelCase : int = 512
lowerCamelCase : Union[str, Any] = 16
lowerCamelCase : Tuple = 21
lowerCamelCase : Dict = """pascal-voc-id2label.json"""
else:
lowerCamelCase : List[str] = 1000
lowerCamelCase : List[str] = """imagenet-1k-id2label.json"""
lowerCamelCase : Optional[Any] = """huggingface/label-files"""
lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : List[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : Union[str, Any] = idalabel
lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=False ) -> Optional[int]:
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
lowerCamelCase : Optional[int] = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
lowerCamelCase : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowerCamelCase : Tuple = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowerCamelCase : int = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowerCamelCase : List[str] = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowerCamelCase : Any = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowerCamelCase : Union[str, Any] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowerCamelCase : List[Any] = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowerCamelCase : Dict = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowerCamelCase : Dict = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
lowerCamelCase : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
lowerCamelCase : Union[str, Any] = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
lowerCamelCase : Optional[int] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowerCamelCase : Tuple = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowerCamelCase : List[str] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
lowerCamelCase : str = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""" )
if F'.global_rep.{i}.bias' in name:
lowerCamelCase : Union[str, Any] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""" )
if ".global_rep." in name:
lowerCamelCase : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowerCamelCase : Optional[Any] = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase : Optional[int] = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowerCamelCase : Any = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowerCamelCase : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowerCamelCase : int = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowerCamelCase : Any = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowerCamelCase : str = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowerCamelCase : Tuple = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowerCamelCase : Dict = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase : Tuple = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowerCamelCase : Dict = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase : str = """mobilevit.""" + name
return name
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=False ) -> Optional[Any]:
if base_model:
lowerCamelCase : str = """"""
else:
lowerCamelCase : Union[str, Any] = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[str] = orig_state_dict.pop(UpperCamelCase__ )
if key[:8] == "encoder.":
lowerCamelCase : Union[str, Any] = key[8:]
if "qkv" in key:
lowerCamelCase : Dict = key.split(""".""" )
lowerCamelCase : List[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase : Dict = int(key_split[3] )
lowerCamelCase : str = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
lowerCamelCase : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase : Dict = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
lowerCamelCase : Dict = val[:dim, :]
lowerCamelCase : List[Any] = val[dim : dim * 2, :]
lowerCamelCase : Optional[int] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Any = val[dim : dim * 2]
lowerCamelCase : str = val[-dim:]
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def snake_case ( ) -> int:
lowerCamelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=False ) -> str:
lowerCamelCase : List[Any] = get_mobilevit_config(UpperCamelCase__ )
# load original state_dict
lowerCamelCase : str = torch.load(UpperCamelCase__ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowerCamelCase : Union[str, Any] = MobileViTForSemanticSegmentation(UpperCamelCase__ ).eval()
else:
lowerCamelCase : Any = MobileViTForImageClassification(UpperCamelCase__ ).eval()
lowerCamelCase : Optional[Any] = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase : Tuple = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase : Optional[int] = model(**UpperCamelCase__ )
lowerCamelCase : int = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase : List[Any] = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase : List[Any] = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase : Dict = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase : Dict = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase : Tuple = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase : Dict = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
lowerCamelCase : Optional[int] = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowerCamelCase : Optional[Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(UpperCamelCase__ , organization="""apple""" )
model.push_to_hub(UpperCamelCase__ , organization="""apple""" )
if __name__ == "__main__":
__lowerCamelCase :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCamelCase :int = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 42
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 1
|
"""simple docstring"""
import numpy as np
def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float ) -> np.ndarray:
return np.where(vector > 0 , UpperCamelCase__ , (alpha * (np.exp(UpperCamelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
__lowerCamelCase :Optional[int] = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 42
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCamelCase :Optional[Any] = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowerCamelCase :str = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowerCamelCase :int = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def snake_case ( UpperCamelCase__ : int ) -> Any:
lowerCamelCase : Dict = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCamelCase__ )[0]
@deprecated(UpperCamelCase__ , """Please use tf.data to implement this functionality.""" )
def snake_case ( UpperCamelCase__ : List[str] ) -> Any:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream:
lowerCamelCase : Optional[Any] = _readaa(UpperCamelCase__ )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
lowerCamelCase : List[Any] = _readaa(UpperCamelCase__ )
lowerCamelCase : int = _readaa(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = _readaa(UpperCamelCase__ )
lowerCamelCase : Dict = bytestream.read(rows * cols * num_images )
lowerCamelCase : Dict = numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta )
lowerCamelCase : Optional[int] = data.reshape(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 )
return data
@deprecated(UpperCamelCase__ , """Please use tf.one_hot on tensors.""" )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
lowerCamelCase : List[Any] = labels_dense.shape[0]
lowerCamelCase : Union[str, Any] = numpy.arange(UpperCamelCase__ ) * num_classes
lowerCamelCase : List[Any] = numpy.zeros((num_labels, num_classes) )
lowerCamelCase : Tuple = 1
return labels_one_hot
@deprecated(UpperCamelCase__ , """Please use tf.data to implement this functionality.""" )
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[str]=10 ) -> str:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream:
lowerCamelCase : Dict = _readaa(UpperCamelCase__ )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
lowerCamelCase : str = _readaa(UpperCamelCase__ )
lowerCamelCase : Any = bytestream.read(UpperCamelCase__ )
lowerCamelCase : Tuple = numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCamelCase__ , UpperCamelCase__ )
return labels
class A__ :
"""simple docstring"""
@deprecated(
__a , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self: Tuple , __a: Tuple , __a: Dict , __a: Tuple=False , __a: int=False , __a: Optional[int]=dtypes.floataa , __a: Tuple=True , __a: int=None , )-> Any:
lowerCamelCase , lowerCamelCase : Dict = random_seed.get_seed(__a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCamelCase : str = dtypes.as_dtype(__a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
lowerCamelCase : Optional[Any] = 10_000
lowerCamelCase : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCamelCase : Union[str, Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCamelCase : List[str] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCamelCase : Optional[Any] = images.astype(numpy.floataa )
lowerCamelCase : List[str] = numpy.multiply(__a , 1.0 / 2_55.0 )
lowerCamelCase : Any = images
lowerCamelCase : List[str] = labels
lowerCamelCase : Dict = 0
lowerCamelCase : Union[str, Any] = 0
@property
def a__ ( self: Tuple )-> List[Any]:
return self._images
@property
def a__ ( self: Optional[int] )-> int:
return self._labels
@property
def a__ ( self: List[Any] )-> Optional[Any]:
return self._num_examples
@property
def a__ ( self: Optional[int] )-> Dict:
return self._epochs_completed
def a__ ( self: Any , __a: str , __a: Optional[int]=False , __a: str=True )-> Optional[int]:
if fake_data:
lowerCamelCase : List[str] = [1] * 784
lowerCamelCase : str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__a )],
[fake_label for _ in range(__a )],
)
lowerCamelCase : Dict = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCamelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(__a )
lowerCamelCase : Any = self.images[perma]
lowerCamelCase : Optional[int] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCamelCase : Union[str, Any] = self._num_examples - start
lowerCamelCase : Tuple = self._images[start : self._num_examples]
lowerCamelCase : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCamelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(__a )
lowerCamelCase : Dict = self.images[perm]
lowerCamelCase : Optional[int] = self.labels[perm]
# Start next epoch
lowerCamelCase : str = 0
lowerCamelCase : Union[str, Any] = batch_size - rest_num_examples
lowerCamelCase : str = self._index_in_epoch
lowerCamelCase : List[Any] = self._images[start:end]
lowerCamelCase : Union[str, Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCamelCase : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCamelCase__ , """Please write your own downloading logic.""" )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> Any:
if not gfile.Exists(UpperCamelCase__ ):
gfile.MakeDirs(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not gfile.Exists(UpperCamelCase__ ):
urllib.request.urlretrieve(UpperCamelCase__ , UpperCamelCase__ ) # noqa: S310
with gfile.GFile(UpperCamelCase__ ) as f:
lowerCamelCase : str = f.size()
print("""Successfully downloaded""" , UpperCamelCase__ , UpperCamelCase__ , """bytes.""" )
return filepath
@deprecated(
UpperCamelCase__ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=dtypes.floataa , UpperCamelCase__ : str=True , UpperCamelCase__ : str=5000 , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=DEFAULT_SOURCE_URL , ) -> Tuple:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCamelCase__ , one_hot=UpperCamelCase__ , dtype=UpperCamelCase__ , seed=UpperCamelCase__ )
lowerCamelCase : Tuple = fake()
lowerCamelCase : List[str] = fake()
lowerCamelCase : str = fake()
return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ )
if not source_url: # empty string check
lowerCamelCase : List[str] = DEFAULT_SOURCE_URL
lowerCamelCase : Any = """train-images-idx3-ubyte.gz"""
lowerCamelCase : Optional[int] = """train-labels-idx1-ubyte.gz"""
lowerCamelCase : Optional[Any] = """t10k-images-idx3-ubyte.gz"""
lowerCamelCase : Any = """t10k-labels-idx1-ubyte.gz"""
lowerCamelCase : Any = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + train_images_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : List[Any] = _extract_images(UpperCamelCase__ )
lowerCamelCase : Tuple = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + train_labels_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : int = _extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + test_images_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : Optional[int] = _extract_images(UpperCamelCase__ )
lowerCamelCase : List[Any] = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + test_labels_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : Dict = _extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ )
if not 0 <= validation_size <= len(UpperCamelCase__ ):
lowerCamelCase : Tuple = (
"""Validation size should be between 0 and """
F'{len(UpperCamelCase__ )}. Received: {validation_size}.'
)
raise ValueError(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = train_images[:validation_size]
lowerCamelCase : Tuple = train_labels[:validation_size]
lowerCamelCase : Optional[int] = train_images[validation_size:]
lowerCamelCase : Optional[int] = train_labels[validation_size:]
lowerCamelCase : Union[str, Any] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
lowerCamelCase : Optional[int] = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : List[Any] = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ )
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCamelCase :Any = ''
__lowerCamelCase :Union[str, Any] = ''
__lowerCamelCase :Optional[Any] = ''
__lowerCamelCase :Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def snake_case ( ) -> None:
lowerCamelCase , lowerCamelCase : Any = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print("""Processing...""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase : Optional[Any] = random_chars(32 )
lowerCamelCase : str = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCamelCase : Any = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(UpperCamelCase__ )} with {file_name}' )
lowerCamelCase : Dict = []
for anno in new_annos[index]:
lowerCamelCase : List[str] = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(UpperCamelCase__ )
with open(F'/{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> tuple[list, list]:
lowerCamelCase : int = []
lowerCamelCase : List[Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , """*.txt""" ) ):
lowerCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
lowerCamelCase : List[Any] = in_file.readlines()
lowerCamelCase : Any = os.path.join(UpperCamelCase__ , F'{label_name}.jpg' )
lowerCamelCase : Any = []
for obj_list in obj_lists:
lowerCamelCase : List[Any] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def snake_case ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ) -> tuple[list, list, list]:
lowerCamelCase : Optional[int] = []
lowerCamelCase : Any = []
lowerCamelCase : int = []
for idx in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = []
lowerCamelCase : int = img_list[idx]
path_list.append(UpperCamelCase__ )
lowerCamelCase : List[str] = anno_list[idx]
lowerCamelCase : str = cva.imread(UpperCamelCase__ )
if flip_type == 1:
lowerCamelCase : Dict = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
lowerCamelCase : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCamelCase : str = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
lowerCamelCase : str = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def snake_case ( UpperCamelCase__ : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase : int = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowerCamelCase :Optional[int] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowerCamelCase :Optional[int] = [0, 25, 50]
__lowerCamelCase :int = [25, 50, 75]
__lowerCamelCase :Union[str, Any] = fuzz.membership.trimf(X, abca)
__lowerCamelCase :Dict = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowerCamelCase :Optional[Any] = np.ones(75)
__lowerCamelCase :Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowerCamelCase :List[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowerCamelCase :Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowerCamelCase :Tuple = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowerCamelCase :List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowerCamelCase :Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowerCamelCase :List[str] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowerCamelCase :Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowerCamelCase :List[Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 42
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 1
|
"""simple docstring"""
from math import factorial
class A__ :
"""simple docstring"""
def __init__( self: Dict , __a: Tuple , __a: Optional[Any] )-> str:
lowerCamelCase : Optional[Any] = real
if isinstance(__a , __a ):
lowerCamelCase : Tuple = [1] * rank
else:
lowerCamelCase : Any = rank
def __repr__( self: int )-> Any:
return (
f'{self.real}+'
f'{"+".join(str(__a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __a )
def __add__( self: Optional[Any] , __a: int )-> Any:
if not isinstance(__a , __a ):
return Dual(self.real + other , self.duals )
lowerCamelCase : Optional[Any] = self.duals.copy()
lowerCamelCase : Any = other.duals.copy()
if len(__a ) > len(__a ):
o_dual.extend([1] * (len(__a ) - len(__a )) )
elif len(__a ) < len(__a ):
s_dual.extend([1] * (len(__a ) - len(__a )) )
lowerCamelCase : Optional[Any] = []
for i in range(len(__a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __a )
snake_case__ : List[str] =__add__
def __sub__( self: str , __a: List[str] )-> Any:
return self + other * -1
def __mul__( self: Optional[int] , __a: List[Any] )-> List[Any]:
if not isinstance(__a , __a ):
lowerCamelCase : Tuple = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __a )
lowerCamelCase : Any = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __a )
snake_case__ : str =__mul__
def __truediv__( self: Optional[Any] , __a: Any )-> List[Any]:
if not isinstance(__a , __a ):
lowerCamelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __a )
raise ValueError
def __floordiv__( self: List[str] , __a: Optional[Any] )-> Tuple:
if not isinstance(__a , __a ):
lowerCamelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __a )
raise ValueError
def __pow__( self: List[str] , __a: Union[str, Any] )-> List[Any]:
if n < 0 or isinstance(__a , __a ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCamelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Optional[int]:
if not callable(UpperCamelCase__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(UpperCamelCase__ , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCamelCase : Union[str, Any] = Dual(UpperCamelCase__ , 1 )
lowerCamelCase : int = func(UpperCamelCase__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def snake_case ( UpperCamelCase__ : Any ) -> Tuple:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
return (pow(UpperCamelCase__ , 2 ) + step) % modulus
for _ in range(UpperCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCamelCase : List[Any] = seed
lowerCamelCase : Dict = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase : Tuple = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Any = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase : List[Any] = gcd(hare - tortoise , UpperCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase : Optional[int] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
__lowerCamelCase :Tuple = parser.parse_args()
__lowerCamelCase :Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
__lowerCamelCase :Any = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 42
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__lowerCamelCase :List[Any] = logging.get_logger('transformers.models.encodec')
__lowerCamelCase :Optional[Any] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__lowerCamelCase :Any = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__lowerCamelCase :int = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__lowerCamelCase :List[str] = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__lowerCamelCase :Any = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__lowerCamelCase :Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__lowerCamelCase :Optional[int] = []
__lowerCamelCase :str = []
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Tuple:
for attribute in key.split(""".""" ):
lowerCamelCase : Tuple = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
lowerCamelCase : str = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
lowerCamelCase : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase : Dict = value
elif weight_type == "weight_g":
lowerCamelCase : str = value
elif weight_type == "weight_v":
lowerCamelCase : Any = value
elif weight_type == "bias":
lowerCamelCase : Union[str, Any] = value
elif weight_type == "running_mean":
lowerCamelCase : int = value
elif weight_type == "running_var":
lowerCamelCase : int = value
elif weight_type == "num_batches_tracked":
lowerCamelCase : List[Any] = value
elif weight_type == "weight_ih_l0":
lowerCamelCase : Tuple = value
elif weight_type == "weight_hh_l0":
lowerCamelCase : Optional[int] = value
elif weight_type == "bias_ih_l0":
lowerCamelCase : Any = value
elif weight_type == "bias_hh_l0":
lowerCamelCase : Tuple = value
elif weight_type == "weight_ih_l1":
lowerCamelCase : int = value
elif weight_type == "weight_hh_l1":
lowerCamelCase : Optional[int] = value
elif weight_type == "bias_ih_l1":
lowerCamelCase : Optional[int] = value
elif weight_type == "bias_hh_l1":
lowerCamelCase : List[Any] = value
else:
lowerCamelCase : Tuple = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ) -> Optional[int]:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase , lowerCamelCase : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
lowerCamelCase : Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase : int = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase : Union[str, Any] = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F'{name} was ignored' )
continue
lowerCamelCase : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase , lowerCamelCase : str = key.split(""".*.""" )
if prefix in name and suffix in name:
lowerCamelCase : List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
lowerCamelCase : List[str] = True
if "*" in mapped_key:
lowerCamelCase : str = name.split(UpperCamelCase__ )[0].split(""".""" )[-2]
lowerCamelCase : Union[str, Any] = mapped_key.replace("""*""" , UpperCamelCase__ )
if "weight_g" in name:
lowerCamelCase : Dict = """weight_g"""
elif "weight_v" in name:
lowerCamelCase : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
lowerCamelCase : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
lowerCamelCase : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
lowerCamelCase : List[Any] = """bias_ih_l0"""
elif "bias_hh_l0" in name:
lowerCamelCase : str = """bias_hh_l0"""
elif "weight_ih_l1" in name:
lowerCamelCase : Any = """weight_ih_l1"""
elif "weight_hh_l1" in name:
lowerCamelCase : List[str] = """weight_hh_l1"""
elif "bias_ih_l1" in name:
lowerCamelCase : Dict = """bias_ih_l1"""
elif "bias_hh_l1" in name:
lowerCamelCase : Dict = """bias_hh_l1"""
elif "bias" in name:
lowerCamelCase : Optional[int] = """bias"""
elif "weight" in name:
lowerCamelCase : str = """weight"""
elif "running_mean" in name:
lowerCamelCase : List[str] = """running_mean"""
elif "running_var" in name:
lowerCamelCase : Any = """running_var"""
elif "num_batches_tracked" in name:
lowerCamelCase : Tuple = """num_batches_tracked"""
else:
lowerCamelCase : List[str] = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , ) -> Optional[int]:
if config_path is not None:
lowerCamelCase : Any = EncodecConfig.from_pretrained(UpperCamelCase__ )
else:
lowerCamelCase : List[str] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase : Optional[int] = [8, 5, 4, 4]
lowerCamelCase : List[Any] = [2.2]
lowerCamelCase : List[str] = 64
lowerCamelCase : Optional[Any] = 32000
lowerCamelCase : List[str] = 2048
lowerCamelCase : Dict = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Any = False
elif model_name == "encodec_48khz":
lowerCamelCase : Dict = [8, 5, 4, 2]
lowerCamelCase : Optional[Any] = [3.0, 6.0, 1_2.0, 2_4.0]
lowerCamelCase : Optional[int] = 48000
lowerCamelCase : str = 2
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = """time_group_norm"""
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Dict = 1.0
lowerCamelCase : str = 0.0_1
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCamelCase : Optional[Any] = EncodecModel(UpperCamelCase__ )
lowerCamelCase : Optional[int] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCamelCase__ )
lowerCamelCase : Any = torch.load(UpperCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase : Optional[Any] = original_checkpoint["""best_state"""]
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 42
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int = 1000000 ) -> int:
lowerCamelCase : str = 1
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : List[str] = {1: 1}
for inputa in range(2 , UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : int = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : List[str] = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : int = counter
if counter > pre_counter:
lowerCamelCase : Dict = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 42
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :List[str] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict ='''openai-gpt'''
snake_case__ : Any ={
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: List[Any] , __a: str=40_478 , __a: str=512 , __a: Any=768 , __a: Any=12 , __a: Any=12 , __a: Optional[Any]="gelu" , __a: Optional[int]=0.1 , __a: List[str]=0.1 , __a: Any=0.1 , __a: Dict=1e-5 , __a: Optional[int]=0.02 , __a: List[Any]="cls_index" , __a: List[str]=True , __a: Dict=None , __a: Optional[Any]=True , __a: Any=0.1 , **__a: Tuple , )-> Dict:
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[Any] = n_positions
lowerCamelCase : List[Any] = n_embd
lowerCamelCase : Optional[int] = n_layer
lowerCamelCase : Optional[Any] = n_head
lowerCamelCase : List[Any] = afn
lowerCamelCase : List[Any] = resid_pdrop
lowerCamelCase : int = embd_pdrop
lowerCamelCase : int = attn_pdrop
lowerCamelCase : List[Any] = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Tuple = summary_type
lowerCamelCase : Union[str, Any] = summary_use_proj
lowerCamelCase : Optional[Any] = summary_activation
lowerCamelCase : Dict = summary_first_dropout
lowerCamelCase : Any = summary_proj_to_labels
super().__init__(**__a )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] =['''image_processor''', '''tokenizer''']
snake_case__ : Optional[int] ='''ViltImageProcessor'''
snake_case__ : Dict =('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self: List[Any] , __a: Optional[Any]=None , __a: Optional[Any]=None , **__a: Any )-> Any:
lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __a , )
lowerCamelCase : Dict = kwargs.pop("""feature_extractor""" )
lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__a , __a )
lowerCamelCase : Dict = self.image_processor
def __call__( self: int , __a: Optional[int] , __a: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a: bool = True , __a: Union[bool, str, PaddingStrategy] = False , __a: Union[bool, str, TruncationStrategy] = None , __a: Optional[int] = None , __a: int = 0 , __a: Optional[int] = None , __a: Optional[bool] = None , __a: Optional[bool] = None , __a: bool = False , __a: bool = False , __a: bool = False , __a: bool = False , __a: bool = True , __a: Optional[Union[str, TensorType]] = None , **__a: Dict , )-> BatchEncoding:
lowerCamelCase : List[Any] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel_values + pixel_mask
lowerCamelCase : Dict = self.image_processor(__a , return_tensors=__a )
encoding.update(__a )
return encoding
def a__ ( self: str , *__a: Dict , **__a: Optional[int] )-> int:
return self.tokenizer.batch_decode(*__a , **__a )
def a__ ( self: Union[str, Any] , *__a: Optional[int] , **__a: int )-> List[Any]:
return self.tokenizer.decode(*__a , **__a )
@property
def a__ ( self: Any )-> Dict:
lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
lowerCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self: List[str] )-> List[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __a , )
return self.image_processor_class
@property
def a__ ( self: List[str] )-> Optional[int]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __a , )
return self.image_processor
| 42
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowerCamelCase : Tuple = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowerCamelCase : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase : List[str] = primes[:idx]
break
lowerCamelCase , lowerCamelCase : List[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase : List[str] = False
for r in range(UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = pow(UpperCamelCase__ , d * 2**r , UpperCamelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase : Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def snake_case ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =OpenAIGPTTokenizer
snake_case__ : Tuple =OpenAIGPTTokenizerFast
snake_case__ : Any =True
snake_case__ : str =False
def a__ ( self: Tuple )-> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase : Optional[Any] = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__a ) )
def a__ ( self: Dict , __a: Dict )-> str:
return "lower newer", "lower newer"
def a__ ( self: int )-> int:
lowerCamelCase : List[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase : str = """lower"""
lowerCamelCase : str = ["""low""", """er</w>"""]
lowerCamelCase : List[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Union[str, Any] = tokens + ["""<unk>"""]
lowerCamelCase : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def a__ ( self: List[str] , __a: Optional[int]=15 )-> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
lowerCamelCase : List[Any] = """This is a simple input"""
lowerCamelCase : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCamelCase : Any = ("""This is a simple input""", """This is a pair""")
lowerCamelCase : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="""max_length""" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="""max_length""" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="""max_length""" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="""max_length""" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="""max_length""" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="""max_length""" , )
def a__ ( self: Optional[Any] )-> List[str]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A__ ( __lowercase):
"""simple docstring"""
pass
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowerCamelCase :Union[str, Any] = get_logger(__name__)
class A__ :
"""simple docstring"""
def __init__( self: Dict , __a: Optional[str] = None )-> Tuple:
lowerCamelCase : Optional[Any] = (
os.path.join(__a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : Tuple = Extractor
def a__ ( self: Optional[int] , __a: str )-> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : List[str] = os.path.abspath(__a )
return os.path.join(self.extract_dir , hash_url_to_filename(__a ) )
def a__ ( self: str , __a: str , __a: bool )-> bool:
return force_extract or (
not os.path.isfile(__a ) and not (os.path.isdir(__a ) and os.listdir(__a ))
)
def a__ ( self: str , __a: str , __a: bool = False )-> str:
lowerCamelCase : Optional[int] = self.extractor.infer_extractor_format(__a )
if not extractor_format:
return input_path
lowerCamelCase : List[str] = self._get_output_path(__a )
if self._do_extract(__a , __a ):
self.extractor.extract(__a , __a , __a )
return output_path
class A__ ( __lowercase):
"""simple docstring"""
@classmethod
@abstractmethod
def a__ ( cls: Union[str, Any] , __a: Union[Path, str] , **__a: Union[str, Any] )-> bool:
...
@staticmethod
@abstractmethod
def a__ ( __a: Union[Path, str] , __a: Union[Path, str] )-> None:
...
class A__ ( __lowercase , __lowercase):
"""simple docstring"""
snake_case__ : List[bytes] =[]
@staticmethod
def a__ ( __a: Union[Path, str] , __a: int )-> List[Any]:
with open(__a , """rb""" ) as f:
return f.read(__a )
@classmethod
def a__ ( cls: Dict , __a: Union[Path, str] , __a: bytes = b"" )-> bool:
if not magic_number:
lowerCamelCase : Dict = max(len(__a ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : str = cls.read_magic_number(__a , __a )
except OSError:
return False
return any(magic_number.startswith(__a ) for cls_magic_number in cls.magic_numbers )
class A__ ( __lowercase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any , __a: Union[Path, str] , **__a: Dict )-> bool:
return tarfile.is_tarfile(__a )
@staticmethod
def a__ ( __a: Any , __a: Any )-> int:
def resolved(__a: str ) -> str:
return os.path.realpath(os.path.abspath(__a ) )
def badpath(__a: str , __a: str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__a , __a ) ).startswith(__a )
def badlink(__a: int , __a: str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : Optional[int] = resolved(os.path.join(__a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__a )
lowerCamelCase : Dict = resolved(__a )
for finfo in members:
if badpath(finfo.name , __a ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(__a , __a ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(__a , __a ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def a__ ( __a: Union[Path, str] , __a: Union[Path, str] )-> None:
os.makedirs(__a , exist_ok=__a )
lowerCamelCase : Any = tarfile.open(__a )
tar_file.extractall(__a , members=TarExtractor.safemembers(__a , __a ) )
tar_file.close()
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =[B'''\x1F\x8B''']
@staticmethod
def a__ ( __a: Union[Path, str] , __a: Union[Path, str] )-> None:
with gzip.open(__a , """rb""" ) as gzip_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Any =[
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def a__ ( cls: Any , __a: Union[Path, str] , __a: bytes = b"" )-> bool:
if super().is_extractable(__a , magic_number=__a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__a , """rb""" ) as fp:
lowerCamelCase : Dict = _EndRecData(__a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : Union[str, Any] = fp.read(__a ) # CD is where we expect it to be
if len(__a ) == sizeCentralDir:
lowerCamelCase : int = struct.unpack(__a , __a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def a__ ( __a: Union[Path, str] , __a: Union[Path, str] )-> None:
os.makedirs(__a , exist_ok=__a )
with zipfile.ZipFile(__a , """r""" ) as zip_file:
zip_file.extractall(__a )
zip_file.close()
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =[B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def a__ ( __a: Union[Path, str] , __a: Union[Path, str] )-> None:
with lzma.open(__a ) as compressed_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : int =[B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def a__ ( __a: Union[Path, str] , __a: Union[Path, str] )-> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__a , exist_ok=__a )
lowerCamelCase : Optional[int] = rarfile.RarFile(__a )
rf.extractall(__a )
rf.close()
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =[B'''\x28\xb5\x2F\xFD''']
@staticmethod
def a__ ( __a: Union[Path, str] , __a: Union[Path, str] )-> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : Tuple = zstd.ZstdDecompressor()
with open(__a , """rb""" ) as ifh, open(__a , """wb""" ) as ofh:
dctx.copy_stream(__a , __a )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] =[B'''\x42\x5A\x68''']
@staticmethod
def a__ ( __a: Union[Path, str] , __a: Union[Path, str] )-> None:
with bza.open(__a , """rb""" ) as compressed_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : str =[B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def a__ ( __a: Union[Path, str] , __a: Union[Path, str] )-> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__a , exist_ok=__a )
with pyazr.SevenZipFile(__a , """r""" ) as archive:
archive.extractall(__a )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] =[B'''\x04\x22\x4D\x18''']
@staticmethod
def a__ ( __a: Union[Path, str] , __a: Union[Path, str] )-> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__a , """rb""" ) as compressed_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class A__ :
"""simple docstring"""
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
snake_case__ : Dict[str, Type[BaseExtractor]] ={
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def a__ ( cls: Optional[int] )-> List[str]:
return max(
len(__a )
for extractor in cls.extractors.values()
if issubclass(__a , __a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def a__ ( __a: Union[Path, str] , __a: int )-> List[Any]:
try:
return MagicNumberBaseExtractor.read_magic_number(__a , magic_number_length=__a )
except OSError:
return b""
@classmethod
def a__ ( cls: Optional[int] , __a: Union[Path, str] , __a: bool = False )-> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__a , )
lowerCamelCase : Dict = cls.infer_extractor_format(__a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def a__ ( cls: Tuple , __a: Union[Path, str] )-> str: # <Added version="2.4.0"/>
lowerCamelCase : Optional[int] = cls._get_magic_number_max_length()
lowerCamelCase : int = cls._read_magic_number(__a , __a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__a , magic_number=__a ):
return extractor_format
@classmethod
def a__ ( cls: Any , __a: Union[Path, str] , __a: Union[Path, str] , __a: Optional[str] = None , __a: Optional[BaseExtractor] = "deprecated" , )-> None:
os.makedirs(os.path.dirname(__a ) , exist_ok=__a )
# Prevent parallel extractions
lowerCamelCase : int = str(Path(__a ).with_suffix(""".lock""" ) )
with FileLock(__a ):
shutil.rmtree(__a , ignore_errors=__a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__a , __a ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__a , )
lowerCamelCase : Any = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__a , __a )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__a ):
return extractor.extract(__a , __a )
| 42
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 1
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : int ) -> bool:
lowerCamelCase : Dict = str(UpperCamelCase__ )
return len(UpperCamelCase__ ) == 9 and set(UpperCamelCase__ ) == set("""123456789""" )
def snake_case ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCamelCase : List[Any] = 100002 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : int = x
lowerCamelCase : Tuple = y
for step in range(UpperCamelCase__ ): # noqa: B007
lowerCamelCase : Dict = a * a - b * b + x
lowerCamelCase : List[str] = 2 * a * b + y
lowerCamelCase : Union[str, Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( UpperCamelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( UpperCamelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCamelCase__ , 1 , 1 ) )
def snake_case ( UpperCamelCase__ : int = 800 , UpperCamelCase__ : int = 600 , UpperCamelCase__ : float = -0.6 , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 3.2 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : bool = True , ) -> Image.Image:
lowerCamelCase : str = Image.new("""RGB""" , (image_width, image_height) )
lowerCamelCase : Optional[int] = img.load()
# loop through the image-coordinates
for image_x in range(UpperCamelCase__ ):
for image_y in range(UpperCamelCase__ ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase : List[Any] = figure_width / image_width * image_height
lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase : Tuple = get_distance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase : List[str] = get_color_coded_rgb(UpperCamelCase__ )
else:
lowerCamelCase : Union[str, Any] = get_black_and_white_rgb(UpperCamelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__lowerCamelCase :Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 1
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
"""simple docstring"""
def __init__( self: str , __a: Dict , __a: Any=13 , __a: Any=7 , __a: List[str]=True , __a: Union[str, Any]=True , __a: int=False , __a: Optional[Any]=True , __a: Optional[Any]=99 , __a: int=32 , __a: Optional[Any]=5 , __a: Dict=4 , __a: List[Any]=37 , __a: Any="gelu" , __a: Any=0.1 , __a: Any=0.1 , __a: Optional[Any]=512 , __a: List[Any]=16 , __a: List[str]=2 , __a: List[str]=0.02 , __a: int=3 , __a: Dict=4 , __a: Dict=None , )-> List[str]:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : str = batch_size
lowerCamelCase : Tuple = seq_length
lowerCamelCase : str = is_training
lowerCamelCase : List[str] = use_input_mask
lowerCamelCase : Tuple = use_token_type_ids
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Optional[int] = hidden_dropout_prob
lowerCamelCase : Any = attention_probs_dropout_prob
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : int = type_vocab_size
lowerCamelCase : Union[str, Any] = type_sequence_label_size
lowerCamelCase : Any = initializer_range
lowerCamelCase : str = num_labels
lowerCamelCase : int = num_choices
lowerCamelCase : Optional[int] = scope
def a__ ( self: Dict )-> Optional[int]:
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : List[str] = None
if self.use_input_mask:
lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : int = None
lowerCamelCase : int = None
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Union[str, Any] )-> Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def a__ ( self: Any , __a: List[str] , __a: Tuple , __a: Optional[Any] , __a: int , __a: str , __a: List[str] , __a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = BioGptModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a )
lowerCamelCase : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Union[str, Any] , __a: Optional[Any] , __a: Tuple , __a: Tuple , __a: Optional[int] , __a: Dict , __a: Union[str, Any] , )-> int:
lowerCamelCase : Dict = BioGptForCausalLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: Tuple , __a: Optional[int] , __a: int , __a: List[Any] , __a: Tuple , __a: Tuple , *__a: Any )-> Optional[Any]:
lowerCamelCase : Optional[int] = BioGptModel(config=__a )
model.to(__a )
model.eval()
# create attention mask
lowerCamelCase : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=__a )
lowerCamelCase : str = self.seq_length // 2
lowerCamelCase : int = 0
# first forward pass
lowerCamelCase , lowerCamelCase : List[Any] = model(__a , attention_mask=__a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCamelCase : Any = ids_tensor((1,) , __a ).item() + 1
lowerCamelCase : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCamelCase : Tuple = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCamelCase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__a )] , dim=1 , )
# get two different outputs
lowerCamelCase : Union[str, Any] = model(__a , attention_mask=__a )["""last_hidden_state"""]
lowerCamelCase : Tuple = model(__a , past_key_values=__a , attention_mask=__a )["""last_hidden_state"""]
# select random slice
lowerCamelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase : Dict = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def a__ ( self: Union[str, Any] , __a: Optional[int] , __a: int , __a: Tuple , __a: List[str] , __a: Dict , *__a: int )-> List[str]:
lowerCamelCase : str = BioGptModel(config=__a ).to(__a ).eval()
lowerCamelCase : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=__a )
# first forward pass
lowerCamelCase : List[str] = model(__a , attention_mask=__a , use_cache=__a )
lowerCamelCase , lowerCamelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase : int = model(__a , attention_mask=__a )["""last_hidden_state"""]
lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a , past_key_values=__a )[
"""last_hidden_state"""
]
# select random slice
lowerCamelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def a__ ( self: Optional[Any] , __a: Dict , __a: Optional[int] , __a: List[Any] , __a: List[Any] , __a: Dict , *__a: List[str] , __a: Tuple=False )-> Optional[int]:
lowerCamelCase : Dict = BioGptForCausalLM(__a )
model.to(__a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCamelCase : Optional[int] = model(__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def a__ ( self: Any , __a: int , *__a: Any )-> str:
lowerCamelCase : List[str] = BioGptModel(__a )
lowerCamelCase : Any = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def a__ ( self: Dict , __a: Tuple , __a: Any , __a: Union[str, Any] , __a: Optional[Any] , __a: Optional[int] , *__a: Tuple )-> int:
lowerCamelCase : str = self.num_labels
lowerCamelCase : str = BioGptForTokenClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Dict )-> Any:
lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : int = config_and_inputs
lowerCamelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[Any] =(
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case__ : Union[str, Any] =(BioGptForCausalLM,) if is_torch_available() else ()
snake_case__ : Dict =(
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[Any] =False
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
lowerCamelCase : Any = BioGptModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: int )-> int:
self.config_tester.run_common_tests()
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Optional[Any] )-> Optional[Any]:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__a )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__a , gradient_checkpointing=__a )
def a__ ( self: Union[str, Any] )-> List[Any]:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__a )
def a__ ( self: str )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__a )
@slow
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : Union[str, Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(__a )
lowerCamelCase : Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCamelCase : Optional[int] = """left"""
# Define PAD Token = EOS Token = 50256
lowerCamelCase : List[str] = tokenizer.eos_token
lowerCamelCase : str = model.config.eos_token_id
# use different length sentences to test batching
lowerCamelCase : int = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase : Union[str, Any] = tokenizer(__a , return_tensors="""pt""" , padding=__a )
lowerCamelCase : Any = inputs["""input_ids"""].to(__a )
lowerCamelCase : Any = model.generate(
input_ids=__a , attention_mask=inputs["""attention_mask"""].to(__a ) , )
lowerCamelCase : str = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(__a )
lowerCamelCase : List[Any] = model.generate(input_ids=__a )
lowerCamelCase : List[Any] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
lowerCamelCase : List[Any] = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(__a )
lowerCamelCase : Dict = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
lowerCamelCase : List[Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
lowerCamelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
lowerCamelCase : Optional[Any] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
@slow
def a__ ( self: Dict )-> Tuple:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[Any] = BioGptModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> str:
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Dict = input_dict["""input_ids"""]
lowerCamelCase : str = input_ids.ne(1 ).to(__a )
lowerCamelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase : Tuple = BioGptForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self: Tuple )-> str:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Dict = 3
lowerCamelCase : Optional[Any] = """multi_label_classification"""
lowerCamelCase : Optional[Any] = input_dict["""input_ids"""]
lowerCamelCase : Any = input_ids.ne(1 ).to(__a )
lowerCamelCase : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase : List[Any] = BioGptForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[int] = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: str )-> Tuple:
lowerCamelCase : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
lowerCamelCase : List[str] = torch.tensor([[2, 4_805, 9, 656, 21]] )
lowerCamelCase : Optional[int] = model(__a )[0]
lowerCamelCase : int = 42_384
lowerCamelCase : Dict = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : int = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Union[str, Any] )-> Dict:
lowerCamelCase : str = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCamelCase : int = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(__a )
torch.manual_seed(0 )
lowerCamelCase : Dict = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(__a )
lowerCamelCase : str = model.generate(
**__a , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=__a , )
lowerCamelCase : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__a )
lowerCamelCase : Dict = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(__a , __a )
| 42
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 1
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__lowerCamelCase :Dict = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__lowerCamelCase :List[str] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> Dict:
lowerCamelCase : Union[str, Any] = set()
lowerCamelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase : Union[str, Any] = char
lowerCamelCase : Optional[int] = set(UpperCamelCase__ )
return pairs
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =VOCAB_FILES_NAMES
snake_case__ : Dict =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Optional[int] , __a: List[str] , __a: Any , __a: Union[str, Any]="<s>" , __a: List[Any]="</s>" , __a: str="</s>" , __a: str="<s>" , __a: str="<unk>" , __a: Dict="<pad>" , __a: List[str]="<mask>" , **__a: str , )-> List[str]:
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , **__a , )
lowerCamelCase : Tuple = vocab_file
lowerCamelCase : List[Any] = merges_file
lowerCamelCase : Tuple = {}
lowerCamelCase : List[str] = 0
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : Tuple = 2
lowerCamelCase : Dict = 3
self.add_from_file(__a )
lowerCamelCase : List[str] = {v: k for k, v in self.encoder.items()}
with open(__a , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase : Optional[int] = merges_handle.read().split("""\n""" )[:-1]
lowerCamelCase : List[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCamelCase : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : Union[str, Any] = {}
def a__ ( self: Optional[Any] , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Optional[Any] = [self.cls_token_id]
lowerCamelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self: str , __a: List[int] , __a: Optional[List[int]] = None , __a: bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def a__ ( self: str , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : int = [self.sep_token_id]
lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self: Optional[Any] )-> Optional[int]:
return len(self.encoder )
def a__ ( self: Any )-> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self: Union[str, Any] , __a: Union[str, Any] )-> int:
if token in self.cache:
return self.cache[token]
lowerCamelCase : Tuple = tuple(__a )
lowerCamelCase : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCamelCase : str = get_pairs(__a )
if not pairs:
return token
while True:
lowerCamelCase : Union[str, Any] = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase : int = bigram
lowerCamelCase : List[str] = []
lowerCamelCase : Optional[Any] = 0
while i < len(__a ):
try:
lowerCamelCase : List[str] = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase : Union[str, Any] = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase : List[str] = tuple(__a )
lowerCamelCase : int = new_word
if len(__a ) == 1:
break
else:
lowerCamelCase : List[str] = get_pairs(__a )
lowerCamelCase : Tuple = """@@ """.join(__a )
lowerCamelCase : List[Any] = word[:-4]
lowerCamelCase : Union[str, Any] = word
return word
def a__ ( self: Tuple , __a: int )-> Dict:
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Optional[Any] = re.findall(r"""\S+\n?""" , __a )
for token in words:
split_tokens.extend(list(self.bpe(__a ).split(""" """ ) ) )
return split_tokens
def a__ ( self: List[str] , __a: Optional[Any] )-> Dict:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def a__ ( self: str , __a: Optional[Any] )-> Optional[Any]:
return self.decoder.get(__a , self.unk_token )
def a__ ( self: Any , __a: List[str] )-> int:
lowerCamelCase : int = """ """.join(__a ).replace("""@@ """ , """""" ).strip()
return out_string
def a__ ( self: Dict , __a: str , __a: Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase : int = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Optional[int] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
if os.path.abspath(self.merges_file ) != os.path.abspath(__a ):
copyfile(self.merges_file , __a )
return out_vocab_file, out_merge_file
def a__ ( self: List[str] , __a: Optional[Any] )-> Union[str, Any]:
if isinstance(__a , __a ):
try:
with open(__a , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
lowerCamelCase : Any = f.readlines()
for lineTmp in lines:
lowerCamelCase : Dict = lineTmp.strip()
lowerCamelCase : Union[str, Any] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
lowerCamelCase : Union[str, Any] = line[:idx]
lowerCamelCase : List[str] = len(self.encoder )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase :Any = logging.get_logger(__name__)
__lowerCamelCase :Tuple = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''data2vec-text'''
def __init__( self: List[str] , __a: List[Any]=30_522 , __a: int=768 , __a: Tuple=12 , __a: Tuple=12 , __a: Tuple=3_072 , __a: Union[str, Any]="gelu" , __a: str=0.1 , __a: Any=0.1 , __a: List[Any]=512 , __a: Tuple=2 , __a: List[Any]=0.02 , __a: Union[str, Any]=1e-1_2 , __a: Optional[int]=1 , __a: Dict=0 , __a: Optional[int]=2 , __a: Any="absolute" , __a: Union[str, Any]=True , __a: Union[str, Any]=None , **__a: List[str] , )-> Optional[Any]:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
lowerCamelCase : List[str] = vocab_size
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Any = intermediate_size
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : int = type_vocab_size
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Optional[Any] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : Any = use_cache
lowerCamelCase : Optional[int] = classifier_dropout
class A__ ( __lowercase):
"""simple docstring"""
@property
def a__ ( self: Tuple )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 42
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase :str = logging.getLogger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Any ='''summarization'''
snake_case__ : int =['''loss''']
snake_case__ : Optional[int] =ROUGE_KEYS
snake_case__ : str ='''rouge2'''
def __init__( self: int , __a: List[Any] , **__a: List[Any] )-> Optional[Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCamelCase : List[Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(__a , num_labels=__a , mode=self.mode , **__a )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCamelCase : str = Path(self.output_dir ) / """metrics.json"""
lowerCamelCase : Optional[Any] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCamelCase : Optional[int] = 0
lowerCamelCase : int = defaultdict(__a )
lowerCamelCase : Union[str, Any] = self.config.model_type
lowerCamelCase : Optional[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCamelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCamelCase : str = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCamelCase : str = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCamelCase : Optional[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCamelCase : Optional[int] = get_git_info()["""repo_sha"""]
lowerCamelCase : str = hparams.num_workers
lowerCamelCase : str = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __a ):
lowerCamelCase : Any = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCamelCase : List[str] = self.decoder_start_token_id
lowerCamelCase : str = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCamelCase : Tuple = False
lowerCamelCase : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCamelCase : Optional[Any] = self.hparams.eval_max_gen_length
else:
lowerCamelCase : Optional[Any] = self.model.config.max_length
lowerCamelCase : Optional[int] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def a__ ( self: Any , __a: Dict[str, torch.Tensor] )-> Dict[str, List[str]]:
lowerCamelCase : List[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(__a , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCamelCase : Dict = True
return readable_batch
def a__ ( self: List[str] , __a: Any , **__a: Any )-> List[Any]:
return self.model(__a , **__a )
def a__ ( self: int , __a: List[int] )-> str:
lowerCamelCase : Union[str, Any] = self.tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
return lmap(str.strip , __a )
def a__ ( self: Union[str, Any] , __a: dict )-> Tuple:
lowerCamelCase : Any = self.tokenizer.pad_token_id
lowerCamelCase , lowerCamelCase : Optional[int] = batch["""input_ids"""], batch["""attention_mask"""]
lowerCamelCase : Optional[Any] = batch["""labels"""]
if isinstance(self.model , __a ):
lowerCamelCase : Optional[int] = self.model._shift_right(__a )
else:
lowerCamelCase : List[str] = shift_tokens_right(__a , __a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCamelCase : Tuple = decoder_input_ids
self.save_readable_batch(__a )
lowerCamelCase : Optional[Any] = self(__a , attention_mask=__a , decoder_input_ids=__a , use_cache=__a )
lowerCamelCase : Union[str, Any] = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCamelCase : int = nn.CrossEntropyLoss(ignore_index=__a )
assert lm_logits.shape[-1] == self.vocab_size
lowerCamelCase : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCamelCase : Union[str, Any] = nn.functional.log_softmax(__a , dim=-1 )
lowerCamelCase , lowerCamelCase : List[Any] = label_smoothed_nll_loss(
__a , __a , self.hparams.label_smoothing , ignore_index=__a )
return (loss,)
@property
def a__ ( self: List[str] )-> int:
return self.tokenizer.pad_token_id
def a__ ( self: str , __a: Optional[Any] , __a: Dict )-> Dict:
lowerCamelCase : Union[str, Any] = self._step(__a )
lowerCamelCase : Optional[int] = dict(zip(self.loss_names , __a ) )
# tokens per batch
lowerCamelCase : int = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCamelCase : int = batch["""input_ids"""].shape[0]
lowerCamelCase : int = batch["""input_ids"""].eq(self.pad ).sum()
lowerCamelCase : List[str] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def a__ ( self: int , __a: List[str] , __a: Any )-> Dict:
return self._generative_step(__a )
def a__ ( self: Any , __a: Any , __a: Any="val" )-> Dict:
self.step_count += 1
lowerCamelCase : Dict = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCamelCase : str = losses["""loss"""]
lowerCamelCase : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCamelCase : Optional[int] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCamelCase : torch.FloatTensor = torch.tensor(__a ).type_as(__a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__a )
lowerCamelCase : str = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCamelCase : int = self.step_count
self.metrics[prefix].append(__a ) # callback writes this to self.metrics_save_path
lowerCamelCase : Union[str, Any] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def a__ ( self: List[str] , __a: List[Any] , __a: Union[str, Any] )-> Dict:
return calculate_rouge(__a , __a )
def a__ ( self: Union[str, Any] , __a: dict )-> dict:
lowerCamelCase : Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCamelCase : Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=__a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCamelCase : str = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCamelCase : List[str] = self.ids_to_clean_text(__a )
lowerCamelCase : List[str] = self.ids_to_clean_text(batch["""labels"""] )
lowerCamelCase : str = self._step(__a )
lowerCamelCase : Union[str, Any] = dict(zip(self.loss_names , __a ) )
lowerCamelCase : Dict = self.calc_generative_metrics(__a , __a )
lowerCamelCase : List[str] = np.mean(lmap(__a , __a ) )
base_metrics.update(gen_time=__a , gen_len=__a , preds=__a , target=__a , **__a )
return base_metrics
def a__ ( self: List[str] , __a: Union[str, Any] , __a: Dict )-> Any:
return self._generative_step(__a )
def a__ ( self: Any , __a: Optional[int] )-> List[str]:
return self.validation_epoch_end(__a , prefix="""test""" )
def a__ ( self: str , __a: int )-> SeqaSeqDataset:
lowerCamelCase : Union[str, Any] = self.n_obs[type_path]
lowerCamelCase : int = self.target_lens[type_path]
lowerCamelCase : Union[str, Any] = self.dataset_class(
self.tokenizer , type_path=__a , n_obs=__a , max_target_length=__a , **self.dataset_kwargs , )
return dataset
def a__ ( self: Optional[int] , __a: str , __a: int , __a: bool = False )-> DataLoader:
lowerCamelCase : List[str] = self.get_dataset(__a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCamelCase : Union[str, Any] = dataset.make_sortish_sampler(__a , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCamelCase : List[Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_sampler=__a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
def a__ ( self: Optional[int] )-> DataLoader:
lowerCamelCase : Tuple = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=__a )
return dataloader
def a__ ( self: List[str] )-> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def a__ ( self: Dict )-> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def a__ ( __a: str , __a: Optional[int] )-> Dict:
BaseTransformer.add_model_specific_args(__a , __a )
add_generic_args(__a , __a )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=__a )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=__a )
parser.add_argument("""--max_tokens_per_batch""" , type=__a , default=__a )
parser.add_argument("""--logger_name""" , type=__a , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=__a , default=-1 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=__a , default=500 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=__a , default=-1 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=__a , default="""summarization""" , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=__a , default=0.0 , required=__a )
parser.add_argument("""--src_lang""" , type=__a , default="""""" , required=__a )
parser.add_argument("""--tgt_lang""" , type=__a , default="""""" , required=__a )
parser.add_argument("""--eval_beams""" , type=__a , default=__a , required=__a )
parser.add_argument(
"""--val_metric""" , type=__a , default=__a , required=__a , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=__a , default=__a , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=__a , default=1 , required=__a , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=__a , default=-1 , required=__a , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''translation'''
snake_case__ : Dict =['''loss''']
snake_case__ : Any =['''bleu''']
snake_case__ : List[str] ='''bleu'''
def __init__( self: Dict , __a: Union[str, Any] , **__a: Any )-> Union[str, Any]:
super().__init__(__a , **__a )
lowerCamelCase : List[str] = hparams.src_lang
lowerCamelCase : List[Any] = hparams.tgt_lang
def a__ ( self: List[str] , __a: Dict , __a: Dict )-> dict:
return calculate_bleu(__a , __a )
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=UpperCamelCase__ )
check_output_dir(UpperCamelCase__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCamelCase : SummarizationModule = SummarizationModule(UpperCamelCase__ )
else:
lowerCamelCase : SummarizationModule = TranslationModule(UpperCamelCase__ )
lowerCamelCase : str = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCamelCase : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase : Dict = os.environ.get("""WANDB_PROJECT""" , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = WandbLogger(name=model.output_dir.name , project=UpperCamelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase : Tuple = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCamelCase : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCamelCase : int = False
lowerCamelCase : Tuple = args.val_metric == """loss"""
lowerCamelCase : pl.Trainer = generic_train(
UpperCamelCase__ , UpperCamelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCamelCase__ ) , early_stopping_callback=UpperCamelCase__ , logger=UpperCamelCase__ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCamelCase : Optional[int] = """"""
lowerCamelCase : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=UpperCamelCase__ ) )
if checkpoints:
lowerCamelCase : List[Any] = checkpoints[-1]
lowerCamelCase : str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
__lowerCamelCase :int = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase :Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase :Any = parser.parse_args()
main(args)
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__lowerCamelCase :List[Any] = random.Random()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=1.0 , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=None ) -> Any:
if rng is None:
lowerCamelCase : Tuple = global_rng
lowerCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self: Optional[int] , __a: str , __a: Union[str, Any]=7 , __a: int=400 , __a: Union[str, Any]=2_000 , __a: int=1 , __a: Union[str, Any]=0.0 , __a: Union[str, Any]=16_000 , __a: Optional[int]=True , __a: Optional[int]=80 , __a: Union[str, Any]=16 , __a: Union[str, Any]=64 , __a: Any="hann_window" , __a: Any=80 , __a: Union[str, Any]=7_600 , __a: Dict=1e-1_0 , __a: List[str]=True , )-> Dict:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : int = batch_size
lowerCamelCase : List[Any] = min_seq_length
lowerCamelCase : Dict = max_seq_length
lowerCamelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase : List[str] = feature_size
lowerCamelCase : Dict = padding_value
lowerCamelCase : Dict = sampling_rate
lowerCamelCase : Any = do_normalize
lowerCamelCase : List[Any] = num_mel_bins
lowerCamelCase : Optional[int] = hop_length
lowerCamelCase : str = win_length
lowerCamelCase : str = win_function
lowerCamelCase : str = fmin
lowerCamelCase : Optional[Any] = fmax
lowerCamelCase : str = mel_floor
lowerCamelCase : Any = return_attention_mask
def a__ ( self: str )-> Any:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def a__ ( self: int , __a: Optional[Any]=False , __a: Tuple=False )-> Dict:
def _flatten(__a: Any ):
return list(itertools.chain(*__a ) )
if equal_length:
lowerCamelCase : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase : int = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase : Dict = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
def a__ ( self: Dict , __a: Tuple=False , __a: int=False )-> Union[str, Any]:
if equal_length:
lowerCamelCase : Any = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase : Dict = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase : int = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] =SpeechTaFeatureExtractor
def a__ ( self: Dict )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def a__ ( self: Tuple , __a: Dict )-> Optional[Any]:
self.assertTrue(np.all(np.mean(__a , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__a , axis=0 ) - 1 ) < 1e-3 ) )
def a__ ( self: Optional[Any] )-> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase : Any = [np.asarray(__a ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase : Any = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowerCamelCase : int = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test batched
lowerCamelCase : Dict = feat_extract(__a , return_tensors="""np""" ).input_values
lowerCamelCase : Any = feat_extract(__a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
def a__ ( self: Optional[int] )-> Any:
lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase : List[str] = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase : List[str] = [None, 1_600, None]
for max_length, padding in zip(__a , __a ):
lowerCamelCase : Optional[int] = feat_extract(__a , padding=__a , max_length=__a , return_tensors="""np""" )
lowerCamelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def a__ ( self: Any )-> Tuple:
lowerCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : List[Any] = range(800 , 1_400 , 200 )
lowerCamelCase : int = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase : Optional[Any] = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase : List[Any] = [None, 1_600, None]
for max_length, padding in zip(__a , __a ):
lowerCamelCase : List[str] = feat_extract(__a , max_length=__a , padding=__a )
lowerCamelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def a__ ( self: str )-> str:
lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase : List[Any] = feat_extract(
__a , truncation=__a , max_length=1_000 , padding="""max_length""" , return_tensors="""np""" )
lowerCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase : Any = feat_extract(
__a , truncation=__a , max_length=1_000 , padding="""longest""" , return_tensors="""np""" )
lowerCamelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase : Optional[int] = feat_extract(
__a , truncation=__a , max_length=2_000 , padding="""longest""" , return_tensors="""np""" )
lowerCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : Tuple = np.random.rand(100 ).astype(np.floataa )
lowerCamelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase : Union[str, Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self: int )-> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase : Union[str, Any] = [np.asarray(__a ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase : Dict = feature_extractor(audio_target=__a , padding=__a , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCamelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
lowerCamelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test batched
lowerCamelCase : List[str] = feature_extractor(__a , return_tensors="""np""" ).input_values
lowerCamelCase : Any = feature_extractor(__a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase : Optional[int] = np.asarray(__a )
lowerCamelCase : Optional[Any] = feature_extractor(__a , return_tensors="""np""" ).input_values
lowerCamelCase : Any = feature_extractor(__a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase : str = feat_extract.model_input_names[0]
lowerCamelCase : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__a ) == len(__a ) for x, y in zip(__a , processed_features[input_name] ) ) )
lowerCamelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__a )
lowerCamelCase : Tuple = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
lowerCamelCase : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def a__ ( self: Any )-> Union[str, Any]:
lowerCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__a )
lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase : Tuple = feat_extract.model_input_names[0]
lowerCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
lowerCamelCase : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase : str = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase : Union[str, Any] = feat_extract.model_input_names[0]
lowerCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase : List[Any] = feat_extract.num_mel_bins # hack!
lowerCamelCase : Optional[Any] = feat_extract.pad(__a , padding="""longest""" , return_tensors="""np""" )[input_name]
lowerCamelCase : str = feat_extract.pad(__a , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def a__ ( self: str )-> int:
lowerCamelCase : List[Any] = self.feat_extract_dict
lowerCamelCase : List[str] = True
lowerCamelCase : Dict = self.feature_extraction_class(**__a )
lowerCamelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase : Tuple = [len(__a ) for x in speech_inputs]
lowerCamelCase : Tuple = feat_extract.model_input_names[0]
lowerCamelCase : Tuple = BatchFeature({input_name: speech_inputs} )
lowerCamelCase : Optional[int] = feat_extract.num_mel_bins # hack!
lowerCamelCase : List[Any] = feat_extract.pad(__a , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __a )
def a__ ( self: int )-> str:
lowerCamelCase : List[Any] = self.feat_extract_dict
lowerCamelCase : Dict = True
lowerCamelCase : List[str] = self.feature_extraction_class(**__a )
lowerCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase : List[Any] = [len(__a ) for x in speech_inputs]
lowerCamelCase : int = feat_extract.model_input_names[0]
lowerCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase : Tuple = min(__a )
lowerCamelCase : List[str] = feat_extract.num_mel_bins # hack!
lowerCamelCase : Any = feat_extract.pad(
__a , padding="""max_length""" , max_length=__a , truncation=__a , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def a__ ( self: int , __a: str )-> List[Any]:
from datasets import load_dataset
lowerCamelCase : Dict = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase : Optional[int] = ds.sort("""id""" ).select(range(__a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self: int )-> Dict:
# fmt: off
lowerCamelCase : str = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
lowerCamelCase : Tuple = self._load_datasamples(1 )
lowerCamelCase : Optional[int] = SpeechTaFeatureExtractor()
lowerCamelCase : int = feature_extractor(__a , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , __a , atol=1e-6 ) )
def a__ ( self: List[str] )-> List[str]:
# fmt: off
lowerCamelCase : List[Any] = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
lowerCamelCase : List[str] = self._load_datasamples(1 )
lowerCamelCase : str = SpeechTaFeatureExtractor()
lowerCamelCase : Dict = feature_extractor(audio_target=__a , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __a , atol=1e-4 ) )
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Union[str, Any] )-> Any:
lowerCamelCase : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__a , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__a , """num_encoder_blocks""" ) )
class A__ :
"""simple docstring"""
def __init__( self: int , __a: str , __a: Optional[Any]=13 , __a: Optional[int]=64 , __a: int=3 , __a: Union[str, Any]=4 , __a: Union[str, Any]=[2, 2, 2, 2] , __a: Tuple=[8, 4, 2, 1] , __a: Dict=[16, 32, 64, 128] , __a: int=[1, 4, 8, 16] , __a: Optional[Any]=[1, 2, 4, 8] , __a: int=True , __a: Optional[int]=True , __a: Tuple="gelu" , __a: int=0.1 , __a: List[Any]=0.1 , __a: Optional[int]=0.02 , __a: Optional[int]=3 , __a: str=None , )-> Optional[Any]:
lowerCamelCase : Any = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : List[str] = image_size
lowerCamelCase : str = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = sr_ratios
lowerCamelCase : List[Any] = depths
lowerCamelCase : Any = hidden_sizes
lowerCamelCase : Tuple = downsampling_rates
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : Tuple = is_training
lowerCamelCase : Tuple = use_labels
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : Tuple = scope
def a__ ( self: Tuple )-> int:
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : int = None
if self.use_labels:
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def a__ ( self: Optional[int] )-> str:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self: List[str] , __a: str , __a: Dict , __a: Union[str, Any] )-> Tuple:
lowerCamelCase : Union[str, Any] = SegformerModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
lowerCamelCase : Tuple = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def a__ ( self: str , __a: Dict , __a: Tuple , __a: str )-> Optional[Any]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : List[Any] = SegformerForSemanticSegmentation(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def a__ ( self: Union[str, Any] , __a: Dict , __a: Any , __a: Dict )-> Optional[Any]:
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : Tuple = SegformerForSemanticSegmentation(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__a )
lowerCamelCase : int = model(__a , labels=__a )
self.parent.assertGreater(result.loss , 0.0 )
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase : List[str] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =(
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[int] =True
snake_case__ : Optional[Any] =False
snake_case__ : Dict =False
snake_case__ : List[str] =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : Optional[Any] = SegformerModelTester(self )
lowerCamelCase : Optional[int] = SegformerConfigTester(self , config_class=__a )
def a__ ( self: Tuple )-> List[str]:
self.config_tester.run_common_tests()
def a__ ( self: List[Any] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Dict )-> int:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__a )
def a__ ( self: Dict )-> List[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__a )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def a__ ( self: Union[str, Any] )-> List[Any]:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def a__ ( self: Tuple )-> int:
pass
def a__ ( self: int )-> int:
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = True
for model_class in self.all_model_classes:
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = True
lowerCamelCase : int = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[Any] = outputs.attentions
lowerCamelCase : Dict = sum(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase : str = True
lowerCamelCase : int = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : str = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.attentions
self.assertEqual(len(__a ) , __a )
# verify the first attentions (first block, first layer)
lowerCamelCase : int = (self.model_tester.image_size // 4) ** 2
lowerCamelCase : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCamelCase : List[Any] = (self.model_tester.image_size // 32) ** 2
lowerCamelCase : Any = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCamelCase : Optional[int] = len(__a )
# Check attention is always last and order is fine
lowerCamelCase : Any = True
lowerCamelCase : int = True
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 1 , len(__a ) )
lowerCamelCase : List[Any] = outputs.attentions
self.assertEqual(len(__a ) , __a )
# verify the first attentions (first block, first layer)
lowerCamelCase : Dict = (self.model_tester.image_size // 4) ** 2
lowerCamelCase : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def a__ ( self: Optional[int] )-> str:
def check_hidden_states_output(__a: Dict , __a: List[Any] , __a: Any ):
lowerCamelCase : Any = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : Tuple = outputs.hidden_states
lowerCamelCase : List[str] = self.model_tester.num_encoder_blocks
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Dict = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> int:
if not self.model_tester.is_training:
return
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(__a ):
continue
lowerCamelCase : Optional[int] = model_class(__a )
model.to(__a )
model.train()
lowerCamelCase : List[Any] = self._prepare_for_class(__a , __a , return_labels=__a )
lowerCamelCase : str = model(**__a ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: Optional[Any] )-> Tuple:
pass
@slow
def a__ ( self: Any )-> Tuple:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = SegformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> int:
lowerCamelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: List[Any] )-> Any:
# only resize + normalize
lowerCamelCase : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a )
lowerCamelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__a )
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : List[str] = image_processor(images=__a , return_tensors="""pt""" )
lowerCamelCase : Any = encoded_inputs.pixel_values.to(__a )
with torch.no_grad():
lowerCamelCase : Dict = model(__a )
lowerCamelCase : Any = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Any )-> str:
# only resize + normalize
lowerCamelCase : List[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a )
lowerCamelCase : List[Any] = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__a )
lowerCamelCase : Optional[int] = prepare_img()
lowerCamelCase : str = image_processor(images=__a , return_tensors="""pt""" )
lowerCamelCase : int = encoded_inputs.pixel_values.to(__a )
with torch.no_grad():
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Optional[int] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __a , atol=1e-1 ) )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
# only resize + normalize
lowerCamelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a )
lowerCamelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__a )
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" )
lowerCamelCase : Optional[int] = encoded_inputs.pixel_values.to(__a )
with torch.no_grad():
lowerCamelCase : List[Any] = model(__a )
lowerCamelCase : Dict = outputs.logits.detach().cpu()
lowerCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(500, 300)] )
lowerCamelCase : Union[str, Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __a )
lowerCamelCase : Any = image_processor.post_process_semantic_segmentation(outputs=__a )
lowerCamelCase : List[str] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __a )
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: str )-> str:
lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
lowerCamelCase : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase : str = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
"""do_convert_rgb""": True,
}
lowerCamelCase : Tuple = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__a , __a )
def a__ ( self: int , **__a: str )-> Dict:
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: Any , **__a: str )-> Optional[int]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: Tuple , **__a: List[Any] )-> Union[str, Any]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: int )-> Any:
shutil.rmtree(self.tmpdirname )
def a__ ( self: Any )-> Dict:
lowerCamelCase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : List[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self: Dict )-> Tuple:
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Any = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
lowerCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : Union[str, Any] = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
lowerCamelCase : int = self.get_image_processor(do_normalize=__a )
lowerCamelCase : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : Optional[int] = self.get_image_processor()
lowerCamelCase : Union[str, Any] = self.get_tokenizer()
lowerCamelCase : Any = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Optional[int] = image_processor(__a , return_tensors="""np""" )
lowerCamelCase : List[Any] = processor(images=__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self: Union[str, Any] )-> Optional[Any]:
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : Dict = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : Tuple = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase : Tuple = processor(text=__a )
lowerCamelCase : str = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : Tuple = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase : List[Any] = self.prepare_image_inputs()
lowerCamelCase : int = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def a__ ( self: Tuple )-> str:
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : int = self.get_tokenizer()
lowerCamelCase : str = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__a )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
lowerCamelCase : int = self.get_image_processor()
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : List[str] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : List[str] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Dict = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 42
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 1
|
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
__lowerCamelCase :Dict = '1'
__lowerCamelCase :List[str] = '0'
__lowerCamelCase :List[str] = '1'
__lowerCamelCase :Optional[int] = ort.SessionOptions()
__lowerCamelCase :Optional[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
__lowerCamelCase :Optional[Any] = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
__lowerCamelCase :List[str] = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
__lowerCamelCase :str = ort.RunOptions()
__lowerCamelCase :Optional[int] = 128
__lowerCamelCase :List[str] = 1
__lowerCamelCase :Optional[Any] = np.ones((batch, sequence), dtype=np.intaa)
__lowerCamelCase :List[str] = np.ones((batch, sequence), dtype=np.intaa)
__lowerCamelCase :Dict = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
__lowerCamelCase :int = time.time()
__lowerCamelCase :Tuple = 2_000
__lowerCamelCase :int = {}
for iter in range(max_iters):
__lowerCamelCase :Union[str, Any] = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1_000 / max_iters))
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowerCamelCase :List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: List[Any] , __a: AutoencoderKL , __a: CLIPTextModel , __a: CLIPTokenizer , __a: UNetaDConditionModel , __a: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a: StableDiffusionSafetyChecker , __a: CLIPImageProcessor , )-> List[str]:
super().__init__()
self.register_modules(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def a__ ( self: Any , __a: Optional[Union[str, int]] = "auto" )-> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def a__ ( self: Optional[Any] )-> int:
self.enable_attention_slicing(__a )
@torch.no_grad()
def __call__( self: List[str] , __a: Union[str, List[str]] , __a: int = 512 , __a: int = 512 , __a: int = 50 , __a: float = 7.5 , __a: Optional[Union[str, List[str]]] = None , __a: Optional[int] = 1 , __a: float = 0.0 , __a: Optional[torch.Generator] = None , __a: Optional[torch.FloatTensor] = None , __a: Optional[str] = "pil" , __a: bool = True , __a: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a: int = 1 , __a: Optional[torch.FloatTensor] = None , **__a: Dict , )-> List[str]:
if isinstance(__a , __a ):
lowerCamelCase : Union[str, Any] = 1
elif isinstance(__a , __a ):
lowerCamelCase : Tuple = len(__a )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__a )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__a )}.' )
# get prompt text embeddings
lowerCamelCase : Tuple = self.tokenizer(
__a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCamelCase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowerCamelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase : Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = text_embeddings.shape
lowerCamelCase : List[Any] = text_embeddings.repeat(1 , __a , 1 )
lowerCamelCase : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase : List[str]
if negative_prompt is None:
lowerCamelCase : Optional[int] = [""""""]
elif type(__a ) is not type(__a ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !='
f' {type(__a )}.' )
elif isinstance(__a , __a ):
lowerCamelCase : Dict = [negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
lowerCamelCase : Union[str, Any] = negative_prompt
lowerCamelCase : Any = text_input_ids.shape[-1]
lowerCamelCase : List[str] = self.tokenizer(
__a , padding="""max_length""" , max_length=__a , truncation=__a , return_tensors="""pt""" , )
lowerCamelCase : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase : List[Any] = uncond_embeddings.shape[1]
lowerCamelCase : Any = uncond_embeddings.repeat(__a , __a , 1 )
lowerCamelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , __a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase : Union[str, Any] = torch.randn(
__a , generator=__a , device="""cpu""" , dtype=__a ).to(self.device )
lowerCamelCase : Optional[int] = torch.randn(__a , generator=__a , device="""cpu""" , dtype=__a ).to(
self.device )
else:
lowerCamelCase : int = torch.randn(
__a , generator=__a , device=self.device , dtype=__a )
lowerCamelCase : Union[str, Any] = torch.randn(__a , generator=__a , device=self.device , dtype=__a )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowerCamelCase : Union[str, Any] = latents_reference.to(self.device )
lowerCamelCase : Optional[Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase : Tuple = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase : int = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase : Tuple = 0 if dx < 0 else dx
lowerCamelCase : Any = 0 if dy < 0 else dy
lowerCamelCase : Optional[Any] = max(-dx , 0 )
lowerCamelCase : Tuple = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase : Tuple = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase : List[str] = {}
if accepts_eta:
lowerCamelCase : Optional[Any] = eta
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
lowerCamelCase : Union[str, Any] = self.unet(__a , __a , encoder_hidden_states=__a ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase : str = noise_pred.chunk(2 )
lowerCamelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : str = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a , __a , __a )
lowerCamelCase : str = 1 / 0.1_82_15 * latents
lowerCamelCase : Optional[Any] = self.vae.decode(__a ).sample
lowerCamelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase : int = self.feature_extractor(self.numpy_to_pil(__a ) , return_tensors="""pt""" ).to(
self.device )
lowerCamelCase , lowerCamelCase : Optional[int] = self.safety_checker(
images=__a , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase : Dict = None
if output_type == "pil":
lowerCamelCase : Tuple = self.numpy_to_pil(__a )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a )
| 42
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase)
class A__ ( __lowercase):
"""simple docstring"""
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case__ : str =field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True})
snake_case__ : ClassVar[Features] =Features({'''text''': Value('''string''')})
snake_case__ : ClassVar[Features] =Features({'''labels''': ClassLabel})
snake_case__ : str ="text"
snake_case__ : str ="labels"
def a__ ( self: Optional[int] , __a: Optional[int] )-> Dict:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
lowerCamelCase : Dict = copy.deepcopy(self )
lowerCamelCase : str = self.label_schema.copy()
lowerCamelCase : Tuple = features[self.label_column]
lowerCamelCase : int = label_schema
return task_template
@property
def a__ ( self: List[str] )-> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 42
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 1
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 1
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__lowerCamelCase :Tuple = 'facebook/wmt19-en-de'
__lowerCamelCase :Any = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__lowerCamelCase :List[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__lowerCamelCase :Union[str, Any] = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__lowerCamelCase :Optional[Any] = tokenizer(['Making tiny model'], return_tensors='pt')
__lowerCamelCase :List[Any] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
__lowerCamelCase :List[str] = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from math import pow
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase : Dict = int(pow(UpperCamelCase__ , UpperCamelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase , lowerCamelCase : Union[str, Any] = backtrack(
UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase , lowerCamelCase : Any = backtrack(
UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ )
return current_sum, solutions_count
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCamelCase__ , UpperCamelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 1
|
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =['''image_processor''', '''tokenizer''']
snake_case__ : Optional[Any] ='''AutoImageProcessor'''
snake_case__ : List[str] ='''AutoTokenizer'''
def __init__( self: str , __a: str=None , __a: Optional[Any]=None , **__a: Union[str, Any] )-> Tuple:
lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __a , )
lowerCamelCase : str = kwargs.pop("""feature_extractor""" )
lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__a , __a )
lowerCamelCase : Any = self.image_processor
lowerCamelCase : Any = False
def __call__( self: Dict , *__a: Optional[Any] , **__a: int )-> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
lowerCamelCase : List[Any] = kwargs.pop("""images""" , __a )
lowerCamelCase : List[str] = kwargs.pop("""text""" , __a )
if len(__a ) > 0:
lowerCamelCase : Any = args[0]
lowerCamelCase : List[Any] = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowerCamelCase : str = self.image_processor(__a , *__a , **__a )
if text is not None:
lowerCamelCase : Union[str, Any] = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase : List[Any] = encodings["""input_ids"""]
return inputs
def a__ ( self: List[str] , *__a: Tuple , **__a: Optional[int] )-> Union[str, Any]:
return self.tokenizer.batch_decode(*__a , **__a )
def a__ ( self: Union[str, Any] , *__a: int , **__a: int )-> Union[str, Any]:
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def a__ ( self: Dict )-> int:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
lowerCamelCase : List[str] = True
lowerCamelCase : int = self.tokenizer
yield
lowerCamelCase : str = self.image_processor
lowerCamelCase : Union[str, Any] = False
def a__ ( self: int , __a: List[str] , __a: List[Any]=False , __a: Optional[int]=None )-> Union[str, Any]:
if added_vocab is None:
lowerCamelCase : Optional[Any] = self.tokenizer.get_added_vocab()
lowerCamelCase : Dict = {}
while tokens:
lowerCamelCase : Union[str, Any] = re.search(r"""<s_(.*?)>""" , __a , re.IGNORECASE )
if start_token is None:
break
lowerCamelCase : List[Any] = start_token.group(1 )
lowerCamelCase : Optional[int] = re.search(rf'</s_{key}>' , __a , re.IGNORECASE )
lowerCamelCase : int = start_token.group()
if end_token is None:
lowerCamelCase : Any = tokens.replace(__a , """""" )
else:
lowerCamelCase : Any = end_token.group()
lowerCamelCase : Tuple = re.escape(__a )
lowerCamelCase : Optional[Any] = re.escape(__a )
lowerCamelCase : Dict = re.search(f'{start_token_escaped}(.*?){end_token_escaped}' , __a , re.IGNORECASE )
if content is not None:
lowerCamelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase : Any = self.tokenajson(__a , is_inner_value=__a , added_vocab=__a )
if value:
if len(__a ) == 1:
lowerCamelCase : str = value[0]
lowerCamelCase : List[Any] = value
else: # leaf nodes
lowerCamelCase : List[Any] = []
for leaf in content.split(r"""<sep/>""" ):
lowerCamelCase : Optional[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase : Optional[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(__a )
if len(output[key] ) == 1:
lowerCamelCase : Optional[Any] = output[key][0]
lowerCamelCase : Tuple = tokens[tokens.find(__a ) + len(__a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__a , added_vocab=__a )
if len(__a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def a__ ( self: List[str] )-> Union[str, Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __a , )
return self.image_processor_class
@property
def a__ ( self: Dict )-> Union[str, Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __a , )
return self.image_processor
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : torch.FloatTensor
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict , __a: Optional[Any]=3 , __a: Any=3 , __a: List[str]=("DownEncoderBlock2D",) , __a: List[str]=(64,) , __a: Union[str, Any]=2 , __a: Union[str, Any]=32 , __a: Union[str, Any]="silu" , __a: List[str]=True , )-> Dict:
super().__init__()
lowerCamelCase : Dict = layers_per_block
lowerCamelCase : List[Any] = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase : str = None
lowerCamelCase : str = nn.ModuleList([] )
# down
lowerCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
lowerCamelCase : Optional[int] = output_channel
lowerCamelCase : Optional[Any] = block_out_channels[i]
lowerCamelCase : List[str] = i == len(__a ) - 1
lowerCamelCase : str = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
lowerCamelCase : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
lowerCamelCase : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
lowerCamelCase : int = nn.SiLU()
lowerCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
lowerCamelCase : List[str] = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
lowerCamelCase : str = False
def a__ ( self: List[Any] , __a: Dict )-> Union[str, Any]:
lowerCamelCase : Tuple = x
lowerCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a: Tuple ):
def custom_forward(*__a: int ):
return module(*__a )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
lowerCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
lowerCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
lowerCamelCase : Tuple = down_block(__a )
# middle
lowerCamelCase : Optional[int] = self.mid_block(__a )
# post-process
lowerCamelCase : Tuple = self.conv_norm_out(__a )
lowerCamelCase : Optional[Any] = self.conv_act(__a )
lowerCamelCase : List[Any] = self.conv_out(__a )
return sample
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: Dict=3 , __a: Dict=3 , __a: Union[str, Any]=("UpDecoderBlock2D",) , __a: List[Any]=(64,) , __a: List[Any]=2 , __a: List[str]=32 , __a: Dict="silu" , __a: int="group" , )-> Union[str, Any]:
super().__init__()
lowerCamelCase : Dict = layers_per_block
lowerCamelCase : List[str] = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[Any] = nn.ModuleList([] )
lowerCamelCase : Any = in_channels if norm_type == """spatial""" else None
# mid
lowerCamelCase : List[str] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
lowerCamelCase : str = list(reversed(__a ) )
lowerCamelCase : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
lowerCamelCase : Optional[Any] = output_channel
lowerCamelCase : str = reversed_block_out_channels[i]
lowerCamelCase : int = i == len(__a ) - 1
lowerCamelCase : str = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
lowerCamelCase : Tuple = output_channel
# out
if norm_type == "spatial":
lowerCamelCase : int = SpatialNorm(block_out_channels[0] , __a )
else:
lowerCamelCase : str = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
lowerCamelCase : int = nn.SiLU()
lowerCamelCase : Optional[int] = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
lowerCamelCase : Union[str, Any] = False
def a__ ( self: List[Any] , __a: List[Any] , __a: List[str]=None )-> List[Any]:
lowerCamelCase : List[str] = z
lowerCamelCase : str = self.conv_in(__a )
lowerCamelCase : str = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a: str ):
def custom_forward(*__a: List[str] ):
return module(*__a )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
lowerCamelCase : Dict = sample.to(__a )
# up
for up_block in self.up_blocks:
lowerCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
lowerCamelCase : Any = sample.to(__a )
# up
for up_block in self.up_blocks:
lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
lowerCamelCase : Tuple = self.mid_block(__a , __a )
lowerCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
lowerCamelCase : str = up_block(__a , __a )
# post-process
if latent_embeds is None:
lowerCamelCase : Union[str, Any] = self.conv_norm_out(__a )
else:
lowerCamelCase : Optional[Any] = self.conv_norm_out(__a , __a )
lowerCamelCase : Optional[int] = self.conv_act(__a )
lowerCamelCase : List[Any] = self.conv_out(__a )
return sample
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Tuple , __a: str , __a: Any , __a: str , __a: List[str]=None , __a: Optional[Any]="random" , __a: List[str]=False , __a: Optional[Any]=True )-> Optional[int]:
super().__init__()
lowerCamelCase : str = n_e
lowerCamelCase : str = vq_embed_dim
lowerCamelCase : int = beta
lowerCamelCase : Union[str, Any] = legacy
lowerCamelCase : List[Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCamelCase : int = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
lowerCamelCase : str = self.used.shape[0]
lowerCamelCase : Dict = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase : Tuple = self.re_embed
lowerCamelCase : Optional[int] = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
lowerCamelCase : Tuple = n_e
lowerCamelCase : List[Any] = sane_index_shape
def a__ ( self: int , __a: List[Any] )-> Tuple:
lowerCamelCase : str = inds.shape
assert len(__a ) > 1
lowerCamelCase : Optional[Any] = inds.reshape(ishape[0] , -1 )
lowerCamelCase : str = self.used.to(__a )
lowerCamelCase : Optional[Any] = (inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase : Optional[Any] = match.argmax(-1 )
lowerCamelCase : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCamelCase : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCamelCase : Optional[int] = self.unknown_index
return new.reshape(__a )
def a__ ( self: Union[str, Any] , __a: Optional[int] )-> Optional[Any]:
lowerCamelCase : List[Any] = inds.shape
assert len(__a ) > 1
lowerCamelCase : Any = inds.reshape(ishape[0] , -1 )
lowerCamelCase : Optional[Any] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase : int = 0 # simply set to zero
lowerCamelCase : Dict = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def a__ ( self: Any , __a: Optional[int] )-> Dict:
# reshape z -> (batch, height, width, channel) and flatten
lowerCamelCase : Dict = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase : Union[str, Any] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
lowerCamelCase : Any = self.embedding(__a ).view(z.shape )
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
lowerCamelCase : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCamelCase : Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase : List[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCamelCase : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCamelCase : int = self.remap_to_used(__a )
lowerCamelCase : Optional[int] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCamelCase : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def a__ ( self: Tuple , __a: List[str] , __a: List[str] )-> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCamelCase : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCamelCase : str = self.unmap_to_all(__a )
lowerCamelCase : List[str] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCamelCase : Dict = self.embedding(__a )
if shape is not None:
lowerCamelCase : List[str] = z_q.view(__a )
# reshape back to match original input shape
lowerCamelCase : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: Dict , __a: Optional[Any] , __a: List[Any]=False )-> str:
lowerCamelCase : Optional[Any] = parameters
lowerCamelCase , lowerCamelCase : List[Any] = torch.chunk(__a , 2 , dim=1 )
lowerCamelCase : Optional[int] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowerCamelCase : Dict = deterministic
lowerCamelCase : Tuple = torch.exp(0.5 * self.logvar )
lowerCamelCase : Union[str, Any] = torch.exp(self.logvar )
if self.deterministic:
lowerCamelCase : Dict = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def a__ ( self: Any , __a: Optional[torch.Generator] = None )-> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowerCamelCase : Optional[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCamelCase : Optional[Any] = self.mean + self.std * sample
return x
def a__ ( self: Optional[Any] , __a: Tuple=None )-> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def a__ ( self: Optional[Any] , __a: Any , __a: List[Any]=[1, 2, 3] )-> str:
if self.deterministic:
return torch.Tensor([0.0] )
lowerCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def a__ ( self: List[Any] )-> Dict:
return self.mean
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase :Tuple = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[str] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Dict = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :int = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowerCamelCase :str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 42
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
if not (isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
lowerCamelCase : Union[str, Any] = len(UpperCamelCase__ )
lowerCamelCase : int = len(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCamelCase : int = 0
lowerCamelCase : int = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCamelCase : Union[str, Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCamelCase : int = i
lowerCamelCase : Dict = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def snake_case ( UpperCamelCase__ : List[Any] ) -> Tuple:
lowerCamelCase : Optional[Any] = tf.convert_to_tensor(UpperCamelCase__ )
lowerCamelCase : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def snake_case ( UpperCamelCase__ : List[str] ) -> str:
lowerCamelCase : List[str] = tf.convert_to_tensor(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = tf.cast(math.pi , x.dtype )
lowerCamelCase : Any = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase : Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase__ , 3 )) ))
return x * cdf
def snake_case ( UpperCamelCase__ : List[Any] ) -> Tuple:
lowerCamelCase : Union[str, Any] = tf.convert_to_tensor(UpperCamelCase__ )
return x * tf.tanh(tf.math.softplus(UpperCamelCase__ ) )
def snake_case ( UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
lowerCamelCase : Tuple = tf.convert_to_tensor(UpperCamelCase__ )
lowerCamelCase : Dict = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase : List[str] = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def snake_case ( UpperCamelCase__ : Dict ) -> str:
lowerCamelCase : int = tf.convert_to_tensor(UpperCamelCase__ )
lowerCamelCase : List[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def snake_case ( UpperCamelCase__ : Dict ) -> Optional[Any]:
return tf.clip_by_value(_gelu(UpperCamelCase__ ) , -10 , 10 )
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=-1 ) -> str:
lowerCamelCase , lowerCamelCase : List[Any] = tf.split(UpperCamelCase__ , 2 , axis=UpperCamelCase__ )
return a * tf.math.sigmoid(UpperCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def snake_case ( UpperCamelCase__ : List[str] ) -> Dict:
return tf.keras.activations.gelu(UpperCamelCase__ , approximate=UpperCamelCase__ )
__lowerCamelCase :Optional[Any] = tf.keras.activations.gelu
__lowerCamelCase :Union[str, Any] = approximate_gelu_wrap
else:
__lowerCamelCase :Any = _gelu
__lowerCamelCase :Any = _gelu_new
__lowerCamelCase :Union[str, Any] = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def snake_case ( UpperCamelCase__ : int ) -> Any:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 42
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None ) -> Any:
if attention_mask is None:
lowerCamelCase : Any = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class A__ :
"""simple docstring"""
snake_case__ : Optional[Any] =OPTConfig
snake_case__ : int ={}
snake_case__ : Dict ='''gelu'''
def __init__( self: List[str] , __a: Optional[Any] , __a: List[str]=13 , __a: Optional[Any]=7 , __a: List[str]=True , __a: Tuple=False , __a: Tuple=99 , __a: Optional[Any]=16 , __a: List[str]=2 , __a: Dict=4 , __a: Any=4 , __a: int="gelu" , __a: Optional[int]=0.1 , __a: List[str]=0.1 , __a: List[Any]=20 , __a: Optional[Any]=2 , __a: str=1 , __a: Optional[Any]=0 , __a: Optional[Any]=16 , __a: Optional[Any]=16 , )-> Tuple:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : List[str] = batch_size
lowerCamelCase : int = seq_length
lowerCamelCase : List[Any] = is_training
lowerCamelCase : int = use_labels
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : Any = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : Tuple = eos_token_id
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : List[Any] = embed_dim
lowerCamelCase : Union[str, Any] = word_embed_proj_dim
lowerCamelCase : str = False
def a__ ( self: Optional[Any] )-> List[str]:
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , )
lowerCamelCase : int = prepare_opt_inputs_dict(__a , __a )
return config, inputs_dict
def a__ ( self: Optional[Any] , __a: Tuple , __a: Tuple )-> List[str]:
lowerCamelCase : str = TFOPTModel(config=__a )
lowerCamelCase : Any = inputs_dict["""input_ids"""]
lowerCamelCase : Any = input_ids[:1, :]
lowerCamelCase : str = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase : Dict = 1
# first forward pass
lowerCamelCase : List[str] = model(__a , attention_mask=__a , use_cache=__a )
lowerCamelCase , lowerCamelCase : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase : Any = model(__a , attention_mask=__a )[0]
lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
@require_tf
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case__ : Union[str, Any] =(TFOPTForCausalLM,) if is_tf_available() else ()
snake_case__ : List[str] =(
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case__ : Dict =False
snake_case__ : List[str] =False
snake_case__ : List[Any] =False
snake_case__ : Tuple =10
def a__ ( self: Dict )-> List[str]:
lowerCamelCase : Tuple = TFOPTModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a )
def a__ ( self: Optional[int] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: Any )-> Dict:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def a__ ( self: Dict )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__a: List[Any] , __a: Tuple ):
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase : List[str] = model_class(config=__a )
lowerCamelCase : Optional[Any] = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase : int = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__a )
lowerCamelCase : List[Any] = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase : Optional[int] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase : Tuple = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __a )
# check that weights remain the same after resizing
lowerCamelCase : List[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase : Dict = False
self.assertTrue(__a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __a )
lowerCamelCase : Any = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase : Optional[int] = False
self.assertTrue(__a )
def snake_case ( UpperCamelCase__ : str ) -> Tuple:
return tf.constant(UpperCamelCase__ , dtype=tf.intaa )
@require_tf
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple =99
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : List[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase : Any = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase : Union[str, Any] = input_ids.shape[0]
lowerCamelCase : List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: List[str] )-> int:
lowerCamelCase : int = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase : Tuple = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCamelCase : str = tf.not_equal(__a , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase : Tuple = model(input_ids=__a , attention_mask=__a ).last_hidden_state
lowerCamelCase : List[Any] = (1, 11, 512)
self.assertEqual(output.shape , __a )
lowerCamelCase : Union[str, Any] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-3 ) )
lowerCamelCase : List[Any] = tf.function(__a , jit_compile=__a )
lowerCamelCase : Optional[Any] = xla_generate(__a , __a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-2 ) )
@require_tf
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: str )-> str:
super().setUp()
lowerCamelCase : List[str] = """facebook/opt-350m"""
def a__ ( self: Optional[int] )-> Tuple:
lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase : Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase : Any = tokenizer(__a , return_tensors="""tf""" , padding=__a , add_special_tokens=__a )
lowerCamelCase : int = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase : Union[str, Any] = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
lowerCamelCase : Dict = tf.function(__a , jit_compile=__a )
lowerCamelCase : str = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
@require_tf
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@property
def a__ ( self: str )-> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
lowerCamelCase : int = """facebook/opt-125m"""
lowerCamelCase : Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase : List[str] = []
lowerCamelCase : Union[str, Any] = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase : str = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase : int = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase : Any = model.generate(__a , max_length=10 )
lowerCamelCase : Tuple = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
def a__ ( self: List[Any] )-> List[Any]:
lowerCamelCase : Optional[int] = """facebook/opt-350m"""
lowerCamelCase : str = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase : Tuple = TFOPTForCausalLM.from_pretrained(__a )
lowerCamelCase : int = """left"""
# use different length sentences to test batching
lowerCamelCase : Any = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase : Union[str, Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a )
lowerCamelCase : Dict = inputs["""input_ids"""]
lowerCamelCase : List[str] = model.generate(input_ids=__a , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase : Tuple = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase : Dict = model.generate(input_ids=__a )
lowerCamelCase : List[str] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase : str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase : Optional[int] = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
lowerCamelCase : str = tokenizer.batch_decode(__a , skip_special_tokens=__a )
lowerCamelCase : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
lowerCamelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
lowerCamelCase : List[str] = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Optional[int] = """facebook/opt-350m"""
lowerCamelCase : Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase : Tuple = []
lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase : List[Any] = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase : int = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase : Dict = model.generate(__a , max_length=10 )
lowerCamelCase : Any = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 42
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 1
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A__ ( _lowerCamelCase):
"""simple docstring"""
def __init__( self: int , __a: List[str] , __a: Optional[Any] = None , __a: Any = None , __a: Tuple = True , __a: int = None , __a: Any = False , __a: int = None , __a: Dict = True , __a: Optional[Any] = "arrow" , **__a: Union[str, Any] , )-> Optional[int]:
super().__init__(
split=A__ , features=A__ , cache_dir=A__ , keep_in_memory=A__ , streaming=A__ , **A__ , )
lowerCamelCase : Dict = load_from_cache_file
lowerCamelCase : Union[str, Any] = file_format
lowerCamelCase : Tuple = Spark(
df=A__ , features=A__ , cache_dir=A__ , working_dir=A__ , **A__ , )
def a__ ( self: Any )-> Optional[int]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase : Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=A__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 700
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.