code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=32 ,__lowerCamelCase=3 ,__lowerCamelCase=4 ,__lowerCamelCase=[10, 20, 30, 40] ,__lowerCamelCase=[2, 2, 3, 2] ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=10 ,__lowerCamelCase=0.02 ,__lowerCamelCase=["stage2", "stage3", "stage4"] ,__lowerCamelCase=[2, 3, 4] ,__lowerCamelCase=None ,) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : Optional[Any] = num_channels
lowerCAmelCase__ : List[str] = num_stages
lowerCAmelCase__ : int = hidden_sizes
lowerCAmelCase__ : str = depths
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Tuple = use_labels
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_labels
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : List[Any] = out_features
lowerCAmelCase__ : List[str] = out_indices
lowerCAmelCase__ : Optional[int] = scope
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : int = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] ,self.num_labels )
lowerCAmelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = ConvNextModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : int = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = ConvNextForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Any = model(__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : List[Any] = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = config_and_inputs
lowerCAmelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case_ =(
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case_ =True
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = ConvNextModelTester(self )
lowerCAmelCase__ : Optional[int] = ConfigTester(self ,config_class=__lowerCamelCase ,has_text_modality=__lowerCamelCase ,hidden_size=37 )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(__lowerCamelCase )
lowerCAmelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : int = [*signature.parameters.keys()]
lowerCAmelCase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) )
lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Tuple = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = ConvNextModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(__lowerCamelCase )
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Optional[Any] = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**__lowerCamelCase )
# verify the logits
lowerCAmelCase__ : int = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCamelCase ,atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase , lowerCamelCase__):
'''simple docstring'''
snake_case_ =(ConvNextBackbone,) if is_torch_available() else ()
snake_case_ =ConvNextConfig
snake_case_ =False
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ConvNextModelTester(self )
| 647
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : List[str]=False):
'''simple docstring'''
lowerCAmelCase__ : int = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight"""))
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias"""))
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias"""))
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight"""))
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias"""))
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
return rename_keys
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[Any]=False):
'''simple docstring'''
for i in range(config.num_hidden_layers):
if base_model:
lowerCAmelCase__ : List[str] = ''''''
else:
lowerCAmelCase__ : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : List[str] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""")
lowerCAmelCase__ : List[Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Any = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Dict = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : int ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : str = dct.pop(lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = val
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ViTMSNConfig()
lowerCAmelCase__ : int = 1000
lowerCAmelCase__ : List[Any] = '''datasets/huggingface/label-files'''
lowerCAmelCase__ : Dict = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(lowerCamelCase_ ,lowerCamelCase_) ,'''r'''))
lowerCAmelCase__ : Any = {int(lowerCamelCase_): v for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = idalabel
lowerCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ : str = 384
lowerCAmelCase__ : Any = 1536
lowerCAmelCase__ : List[str] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : int = 4096
lowerCAmelCase__ : Dict = 24
lowerCAmelCase__ : List[str] = 16
lowerCAmelCase__ : List[str] = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ : List[Any] = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ : Any = 7
lowerCAmelCase__ : Optional[int] = 1024
lowerCAmelCase__ : Optional[int] = 4096
lowerCAmelCase__ : Dict = 24
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Optional[Any] = 0.1
lowerCAmelCase__ : List[str] = ViTMSNModel(lowerCamelCase_)
lowerCAmelCase__ : int = torch.hub.load_state_dict_from_url(lowerCamelCase_ ,map_location='''cpu''')['''target_encoder''']
lowerCAmelCase__ : List[str] = ViTImageProcessor(size=config.image_size)
remove_projection_head(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = create_rename_keys(lowerCamelCase_ ,base_model=lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
read_in_q_k_v(lowerCamelCase_ ,lowerCamelCase_ ,base_model=lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
lowerCAmelCase__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw)
lowerCAmelCase__ : Tuple = ViTImageProcessor(
size=config.image_size ,image_mean=lowerCamelCase_ ,image_std=lowerCamelCase_)
lowerCAmelCase__ : int = image_processor(images=lowerCamelCase_ ,return_tensors='''pt''')
# forward pass
torch.manual_seed(2)
lowerCAmelCase__ : Optional[int] = model(**lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ : int = torch.tensor([[-1.0915, -1.4876, -1.1809]])
elif "b16" in checkpoint_url:
lowerCAmelCase__ : List[str] = torch.tensor([[14.2889, -18.9045, 11.7281]])
elif "l16" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[41.5028, -22.8681, 45.6475]])
elif "b4" in checkpoint_url:
lowerCAmelCase__ : Dict = torch.tensor([[-4.3868, 5.2932, -0.4137]])
else:
lowerCAmelCase__ : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]])
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,lowerCamelCase_ ,atol=1E-4)
print(f"""Saving model to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase_)
print(f"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : List[str] =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 1
|
'''simple docstring'''
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def snake_case (UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]=0 ):
'''simple docstring'''
if name is None:
lowerCamelCase__ = None
else:
lowerCamelCase__ = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowerCamelCase__ = fmt.format(UpperCamelCase )
# Print and recurse (if needed).
if isinstance(UpperCamelCase , UpperCamelCase ):
if msg is not None:
print(UpperCamelCase )
for k in val.keys():
recursive_print(UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(UpperCamelCase , torch.Tensor ):
print(UpperCamelCase , """:""" , val.size() )
else:
print(UpperCamelCase , """:""" , UpperCamelCase )
def snake_case (UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCamelCase__ = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCamelCase__ = param.view(*UpperCamelCase )
lowerCamelCase__ = param.transpose(0 , 2 )
lowerCamelCase__ = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCamelCase__ = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCamelCase__ = param.view(*UpperCamelCase )
lowerCamelCase__ = param.transpose(0 , 1 ).contiguous()
lowerCamelCase__ = param.view(*UpperCamelCase )
return param
def snake_case (UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ = {}
# old versions did not store training args
lowerCamelCase__ = input_state_dict.get("""args""" , UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCamelCase__ = ds_args.padded_vocab_size
lowerCamelCase__ = ds_args.max_position_embeddings
lowerCamelCase__ = ds_args.hidden_size
lowerCamelCase__ = ds_args.num_layers
lowerCamelCase__ = ds_args.num_attention_heads
lowerCamelCase__ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCamelCase__ = config.n_head
# The hidden_size per head.
lowerCamelCase__ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCamelCase__ = input_state_dict["""checkpoint_version"""]
else:
lowerCamelCase__ = 0.0
# The model.
lowerCamelCase__ = input_state_dict["""model"""]
# The language model.
lowerCamelCase__ = model["""language_model"""]
# The embeddings.
lowerCamelCase__ = lm["""embedding"""]
# The word embeddings.
lowerCamelCase__ = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowerCamelCase__ = word_embeddings[: config.vocab_size, :]
lowerCamelCase__ = word_embeddings
# The position embeddings.
lowerCamelCase__ = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCamelCase__ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCamelCase__ = pos_embeddings
# The transformer.
lowerCamelCase__ = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowerCamelCase__ = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowerCamelCase__ = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCamelCase__ = layer_re.match(UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCamelCase__ = int(m.group(1 ) )
# The name of the operation.
lowerCamelCase__ = m.group(2 )
# Is it a weight or a bias?
lowerCamelCase__ = m.group(3 )
# The name of the layer.
lowerCamelCase__ = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowerCamelCase__ = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowerCamelCase__ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCamelCase__ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCamelCase__ = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCamelCase__ = masked_bias
lowerCamelCase__ = fix_query_key_value_ordering(UpperCamelCase , UpperCamelCase , 3 , UpperCamelCase , UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCamelCase__ = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCamelCase__ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCamelCase__ = fix_query_key_value_ordering(UpperCamelCase , UpperCamelCase , 3 , UpperCamelCase , UpperCamelCase )
# Store. No change of shape.
lowerCamelCase__ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCamelCase__ = megatron_to_transformers[op_name]
lowerCamelCase__ = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCamelCase__ = megatron_to_transformers[op_name]
lowerCamelCase__ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCamelCase__ = transformer["""final_layernorm.weight"""]
lowerCamelCase__ = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCamelCase__ = word_embeddings
# It should be done!
return output_state_dict
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
lowerCamelCase__ = parser.parse_args()
# Extract the basename.
lowerCamelCase__ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowerCamelCase__ = torch.load(UpperCamelCase , map_location="""cpu""" )
else:
lowerCamelCase__ = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowerCamelCase__ = input_state_dict.get("""args""" , UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCamelCase__ = """gelu_fast"""
elif ds_args.openai_gelu:
lowerCamelCase__ = """gelu_new"""
else:
lowerCamelCase__ = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowerCamelCase__ = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowerCamelCase__ = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type="""cls_index""" , summary_use_proj=UpperCamelCase , summary_activation=UpperCamelCase , summary_proj_to_labels=UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=UpperCamelCase , use_cache=UpperCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCamelCase__ = GPTaConfig.from_json_file(args.config_file )
lowerCamelCase__ = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowerCamelCase__ = convert_megatron_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(UpperCamelCase , UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCamelCase__ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCamelCase__ = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowerCamelCase__ = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCamelCase__ = """gpt2"""
lowerCamelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase )
lowerCamelCase__ = type(UpperCamelCase ).__name__
lowerCamelCase__ = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(UpperCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(UpperCamelCase )
# Store the state_dict to file.
lowerCamelCase__ = os.path.join(UpperCamelCase , """pytorch_model.bin""" )
print(f'''Saving checkpoint to \"{output_checkpoint_file}\"''' )
torch.save(UpperCamelCase , UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 705
|
import math
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case (UpperCamelCase : float = 0.1 ):
'''simple docstring'''
lowerCamelCase__ = 3
lowerCamelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 235
| 0
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
"""simple docstring"""
@staticmethod
def __A ( *_UpperCamelCase : Tuple ,**_UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class __a ( unittest.TestCase ):
"""simple docstring"""
_A : Any = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __A ( self : int ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" )
SCREAMING_SNAKE_CASE__ =[
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def __A ( self : Dict ,_UpperCamelCase : List[str] ,_UpperCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =vqa_pipeline(_UpperCamelCase ,top_k=1 )
self.assertEqual(
_UpperCamelCase ,[
[{"""score""": ANY(_UpperCamelCase ), """answer""": ANY(_UpperCamelCase )}],
[{"""score""": ANY(_UpperCamelCase ), """answer""": ANY(_UpperCamelCase )}],
] ,)
@require_torch
def __A ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" )
SCREAMING_SNAKE_CASE__ ="""./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE__ ="""How many cats are there?"""
SCREAMING_SNAKE_CASE__ =vqa_pipeline(image=_UpperCamelCase ,question="""How many cats are there?""" ,top_k=2 )
self.assertEqual(
_UpperCamelCase ,[{"""score""": ANY(_UpperCamelCase ), """answer""": ANY(_UpperCamelCase )}, {"""score""": ANY(_UpperCamelCase ), """answer""": ANY(_UpperCamelCase )}] )
SCREAMING_SNAKE_CASE__ =vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(
_UpperCamelCase ,[{"""score""": ANY(_UpperCamelCase ), """answer""": ANY(_UpperCamelCase )}, {"""score""": ANY(_UpperCamelCase ), """answer""": ANY(_UpperCamelCase )}] )
@slow
@require_torch
def __A ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =pipeline("""visual-question-answering""" ,model="""dandelin/vilt-b32-finetuned-vqa""" )
SCREAMING_SNAKE_CASE__ ="""./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE__ ="""How many cats are there?"""
SCREAMING_SNAKE_CASE__ =vqa_pipeline(image=_UpperCamelCase ,question=_UpperCamelCase ,top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase ,decimals=4 ) ,[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
SCREAMING_SNAKE_CASE__ =vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase ,decimals=4 ) ,[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
SCREAMING_SNAKE_CASE__ =vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase ,decimals=4 ) ,[[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 ,)
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def __A ( self : int ) -> Tuple:
'''simple docstring'''
pass
| 151
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =ArgumentParser("""Diffusers CLI tool""", usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE__ =parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase )
# Let's go
SCREAMING_SNAKE_CASE__ =parser.parse_args()
if not hasattr(__UpperCamelCase, """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ =args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 151
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
UpperCamelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
a_ = {}
with open(_A , '''r''' ) as file:
for line_number, line in enumerate(_A ):
a_ = line.strip()
if line:
a_ = line.split()
a_ = line_number
a_ = words[0]
a_ = value
return result
def UpperCAmelCase__ ( _A , _A , _A , _A , _A ):
"""simple docstring"""
for attribute in key.split('''.''' ):
a_ = getattr(_A , _A )
a_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
a_ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
a_ = '''param'''
if weight_type is not None and weight_type != "param":
a_ = getattr(_A , _A ).shape
elif weight_type is not None and weight_type == "param":
a_ = hf_pointer
for attribute in hf_param_name.split('''.''' ):
a_ = getattr(_A , _A )
a_ = shape_pointer.shape
# let's reduce dimension
a_ = value[0]
else:
a_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
a_ = getattr(_A , _A )
a_ = value
else:
a_ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ ( _A , _A , _A , _A , _A ):
"""simple docstring"""
a_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
a_ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
a_ = '''param'''
if weight_type is not None and weight_type != "param":
a_ = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a_ = '''.'''.join([key, hf_param_name] )
else:
a_ = key
a_ = value if '''lm_head''' in full_key else value[0]
UpperCamelCase__ = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def UpperCAmelCase__ ( _A , _A , _A=None , _A=None ):
"""simple docstring"""
a_ = False
for key, mapped_key in MAPPING.items():
a_ = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(_A )[0].split('''.''' )[-2]
a_ = mapped_key.replace('''*''' , _A )
if "weight_g" in name:
a_ = '''weight_g'''
elif "weight_v" in name:
a_ = '''weight_v'''
elif "bias" in name:
a_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ = '''weight'''
else:
a_ = None
if hf_dict is not None:
rename_dict(_A , _A , _A , _A , _A )
else:
set_recursively(_A , _A , _A , _A , _A )
return is_used
return is_used
def UpperCAmelCase__ ( _A , _A , _A ):
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == '''group''' , )
a_ = True
else:
a_ = load_wavaveca_layer(_A , _A , _A )
if not is_used:
unused_weights.append(_A )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCAmelCase__ ( _A , _A , _A , _A , _A ):
"""simple docstring"""
a_ = full_name.split('''conv_layers.''' )[-1]
a_ = name.split('''.''' )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
a_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
a_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
a_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
a_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCAmelCase__ ( _A , _A , _A=None , _A=None , _A=True , _A=False ):
"""simple docstring"""
if config_path is not None:
a_ = WavaVecaConfig.from_pretrained(_A )
else:
a_ = WavaVecaConfig()
if is_seq_class:
a_ = read_txt_into_dict(_A )
a_ = idalabel
a_ = WavaVecaForSequenceClassification(_A )
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
feature_extractor.save_pretrained(_A )
elif is_finetuned:
if dict_path:
a_ = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.eos_index
a_ = len(target_dict.symbols )
a_ = os.path.join(_A , '''vocab.json''' )
if not os.path.isdir(_A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
a_ = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ = 0
a_ = 1
with open(_A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_A , _A )
a_ = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_A , )
a_ = True if config.feat_extract_norm == '''layer''' else False
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
a_ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
a_ = WavaVecaForCTC(_A )
else:
a_ = WavaVecaForPreTraining(_A )
if is_finetuned or is_seq_class:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
a_ = argparse.Namespace(task='''audio_pretraining''' )
a_ = fairseq.tasks.setup_task(_A )
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A )
a_ = model[0].eval()
recursively_load_weights(_A , _A , not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 143
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = np.argmax(_A , axis=1 )
return np.sum(outputs == labels )
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
with open(_A , encoding='''utf_8''' ) as f:
a_ = csv.reader(_A )
a_ = []
next(_A ) # skip the first line
for line in tqdm(_A ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCAmelCase__ ( _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
a_ = []
for dataset in encoded_datasets:
a_ = len(_A )
a_ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
a_ = np.zeros((n_batch, 2) , dtype=np.intaa )
a_ = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
a_ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_A ):
a_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a_ = with_conta
a_ = with_conta
a_ = len(_A ) - 1
a_ = len(_A ) - 1
a_ = with_conta
a_ = with_conta
a_ = mc_label
a_ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_A ) for t in all_inputs ) )
return tensor_datasets
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_A , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_A , type=_A , required=_A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_A , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_A , default='''''' )
parser.add_argument('''--seed''' , type=_A , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_A , default=3 )
parser.add_argument('''--train_batch_size''' , type=_A , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_A , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=_A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_A , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_A , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_A , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_A , default=6.2_5e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_A , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_A , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_A , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_A , default=0.9 )
parser.add_argument('''--n_valid''' , type=_A , default=374 )
parser.add_argument('''--server_ip''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' )
a_ = parser.parse_args()
print(_A )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
a_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
a_ = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_A , _A ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
a_ = ['''_start_''', '''_delimiter_''', '''_classify_''']
a_ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_A )
a_ = tokenizer.convert_tokens_to_ids(_A )
a_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_A ) )
model.to(_A )
# Load and encode the datasets
def tokenize_and_encode(_A ):
if isinstance(_A , _A ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_A ) )
elif isinstance(_A , _A ):
return obj
return [tokenize_and_encode(_A ) for o in obj]
logger.info('''Encoding dataset...''' )
a_ = load_rocstories_dataset(args.train_dataset )
a_ = load_rocstories_dataset(args.eval_dataset )
a_ = (train_dataset, eval_dataset)
a_ = tokenize_and_encode(_A )
# Compute the max input length for the Transformer
a_ = model.config.n_positions // 2 - 2
a_ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
a_ = min(_A , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
a_ = pre_process_datasets(_A , _A , _A , *_A )
a_ , a_ = tensor_datasets[0], tensor_datasets[1]
a_ = TensorDataset(*_A )
a_ = RandomSampler(_A )
a_ = DataLoader(_A , sampler=_A , batch_size=args.train_batch_size )
a_ = TensorDataset(*_A )
a_ = SequentialSampler(_A )
a_ = DataLoader(_A , sampler=_A , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
a_ = args.max_steps
a_ = args.max_steps // (len(_A ) // args.gradient_accumulation_steps) + 1
else:
a_ = len(_A ) // args.gradient_accumulation_steps * args.num_train_epochs
a_ = list(model.named_parameters() )
a_ = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
a_ = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
a_ = AdamW(_A , lr=args.learning_rate , eps=args.adam_epsilon )
a_ = get_linear_schedule_with_warmup(
_A , num_warmup_steps=args.warmup_steps , num_training_steps=_A )
if args.do_train:
a_ , a_ , a_ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
a_ = 0
a_ = 0
a_ = tqdm(_A , desc='''Training''' )
for step, batch in enumerate(_A ):
a_ = tuple(t.to(_A ) for t in batch )
a_ , a_ , a_ , a_ = batch
a_ = model(_A , mc_token_ids=_A , lm_labels=_A , mc_labels=_A )
a_ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
a_ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
a_ = '''Training loss: {:.2e} lr: {:.2e}'''.format(_A , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
a_ = model.module if hasattr(_A , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
a_ = os.path.join(args.output_dir , _A )
a_ = os.path.join(args.output_dir , _A )
torch.save(model_to_save.state_dict() , _A )
model_to_save.config.to_json_file(_A )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
a_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
a_ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_A )
if args.do_eval:
model.eval()
a_ , a_ = 0, 0
a_ , a_ = 0, 0
for batch in tqdm(_A , desc='''Evaluating''' ):
a_ = tuple(t.to(_A ) for t in batch )
a_ , a_ , a_ , a_ = batch
with torch.no_grad():
a_ , a_ , a_ , a_ = model(
_A , mc_token_ids=_A , lm_labels=_A , mc_labels=_A )
a_ = mc_logits.detach().cpu().numpy()
a_ = mc_labels.to('''cpu''' ).numpy()
a_ = accuracy(_A , _A )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
a_ = eval_loss / nb_eval_steps
a_ = eval_accuracy / nb_eval_examples
a_ = tr_loss / nb_tr_steps if args.do_train else None
a_ = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
a_ = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 143
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __UpperCamelCase ):
'''simple docstring'''
_lowerCamelCase: Any = """upernet"""
def __init__( self : int ,A_ : List[Any]=None ,A_ : Optional[Any]=512 ,A_ : str=0.02 ,A_ : int=[1, 2, 3, 6] ,A_ : Any=True ,A_ : str=0.4 ,A_ : List[str]=384 ,A_ : Optional[int]=256 ,A_ : Dict=1 ,A_ : List[Any]=False ,A_ : Optional[int]=255 ,**A_ : Any ,) -> List[str]:
super().__init__(**A_ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
A = CONFIG_MAPPING["""resnet"""](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(A_ ,A_ ):
A = backbone_config.get('model_type' )
A = CONFIG_MAPPING[backbone_model_type]
A = config_class.from_dict(A_ )
A = backbone_config
A = hidden_size
A = initializer_range
A = pool_scales
A = use_auxiliary_head
A = auxiliary_loss_weight
A = auxiliary_in_channels
A = auxiliary_channels
A = auxiliary_num_convs
A = auxiliary_concat_input
A = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
A = copy.deepcopy(self.__dict__ )
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output
| 91
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : str=32 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : List[Any]=10 , UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] , UpperCAmelCase : Any=[1, 1, 2, 1] , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : str="relu" , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Optional[Any]=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : List[str] = embeddings_size
lowerCAmelCase_ : Union[str, Any] = hidden_sizes
lowerCAmelCase_ : List[str] = depths
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : Optional[int] = num_labels
lowerCAmelCase_ : List[str] = scope
lowerCAmelCase_ : Any = len(UpperCAmelCase )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : List[str] = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A ( self : int , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Optional[int] = TFRegNetModel(config=UpperCAmelCase )
lowerCAmelCase_ : List[Any] = model(UpperCAmelCase , training=UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : List[str] = TFRegNetForImageClassification(UpperCAmelCase )
lowerCAmelCase_ : Dict = model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = config_and_inputs
lowerCAmelCase_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__snake_case : Optional[int] = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__snake_case : Optional[int] = False
__snake_case : Union[str, Any] = False
__snake_case : Any = False
__snake_case : Optional[Any] = False
__snake_case : str = False
def A ( self : Dict ):
lowerCAmelCase_ : Tuple = TFRegNetModelTester(self )
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def A ( self : str ):
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def A ( self : Optional[int] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def A ( self : Dict ):
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def A ( self : Any ):
pass
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Dict = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : List[str] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : List[str] ):
def check_hidden_states_output(UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ):
lowerCAmelCase_ : Dict = model_class(UpperCAmelCase )
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) , training=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase_ : List[Any] = layer_type
lowerCAmelCase_ : Any = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : Optional[int] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Optional[int] ):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int={} ):
lowerCAmelCase_ : str = model(UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : int = model(UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase : Tuple , UpperCAmelCase : int ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCAmelCase , UpperCAmelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCAmelCase_ : List[str] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {"""output_hidden_states""": True} )
lowerCAmelCase_ : Any = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCAmelCase_ : List[str] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {"""output_hidden_states""": True} )
def A ( self : Any ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def A ( self : str ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[Any] = TFRegNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : int ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Dict ):
lowerCAmelCase_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ : List[Any] = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=UpperCAmelCase , return_tensors="""tf""" )
# forward pass
lowerCAmelCase_ : List[str] = model(**UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Dict = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 )
| 600
| 0
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
a_ : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
a_ : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
a_ : str = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
a_ : int = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
a_ : List[Any] = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
a_ : Union[str, Any] = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
a_ : Tuple = tf.keras.preprocessing.image.img_to_array(test_image)
a_ : List[str] = np.expand_dims(test_image, axis=0)
a_ : List[str] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
a_ : List[Any] = 'Normal'
if result[0][0] == 1:
a_ : Tuple = 'Abnormality detected'
| 710
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
debug_launcher(test_script.main)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
debug_launcher(test_ops.main)
| 444
| 0
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase__ ( lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Dict = 1 / sqrt(2 ) ):
snake_case : List[str] = tau * frequency / samplerate
snake_case : Any = sin(lowercase__ )
snake_case : Tuple = cos(lowercase__ )
snake_case : str = _sin / (2 * q_factor)
snake_case : List[Any] = (1 - _cos) / 2
snake_case : Dict = 1 - _cos
snake_case : Optional[int] = 1 + alpha
snake_case : List[Any] = -2 * _cos
snake_case : Tuple = 1 - alpha
snake_case : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : Dict = 1 / sqrt(2 ) ):
snake_case : Optional[Any] = tau * frequency / samplerate
snake_case : List[str] = sin(lowercase__ )
snake_case : Optional[int] = cos(lowercase__ )
snake_case : Union[str, Any] = _sin / (2 * q_factor)
snake_case : Any = (1 + _cos) / 2
snake_case : Any = -1 - _cos
snake_case : List[str] = 1 + alpha
snake_case : str = -2 * _cos
snake_case : Optional[int] = 1 - alpha
snake_case : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Dict = 1 / sqrt(2 ) ):
snake_case : List[Any] = tau * frequency / samplerate
snake_case : Optional[int] = sin(lowercase__ )
snake_case : str = cos(lowercase__ )
snake_case : Optional[Any] = _sin / (2 * q_factor)
snake_case : int = _sin / 2
snake_case : Union[str, Any] = 0
snake_case : int = -ba
snake_case : List[str] = 1 + alpha
snake_case : Any = -2 * _cos
snake_case : Dict = 1 - alpha
snake_case : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : Any = 1 / sqrt(2 ) ):
snake_case : int = tau * frequency / samplerate
snake_case : Any = sin(lowercase__ )
snake_case : List[str] = cos(lowercase__ )
snake_case : str = _sin / (2 * q_factor)
snake_case : Union[str, Any] = 1 - alpha
snake_case : Any = -2 * _cos
snake_case : int = 1 + alpha
snake_case : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] = 1 / sqrt(2 ) , ):
snake_case : Optional[int] = tau * frequency / samplerate
snake_case : str = sin(lowercase__ )
snake_case : Any = cos(lowercase__ )
snake_case : Dict = _sin / (2 * q_factor)
snake_case : str = 10 ** (gain_db / 40)
snake_case : List[str] = 1 + alpha * big_a
snake_case : Dict = -2 * _cos
snake_case : int = 1 - alpha * big_a
snake_case : Tuple = 1 + alpha / big_a
snake_case : Tuple = -2 * _cos
snake_case : List[str] = 1 - alpha / big_a
snake_case : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict = 1 / sqrt(2 ) , ):
snake_case : List[str] = tau * frequency / samplerate
snake_case : Optional[int] = sin(lowercase__ )
snake_case : Dict = cos(lowercase__ )
snake_case : Dict = _sin / (2 * q_factor)
snake_case : Optional[int] = 10 ** (gain_db / 40)
snake_case : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
snake_case : int = (big_a + 1) + (big_a - 1) * _cos
snake_case : List[Any] = (big_a - 1) - (big_a + 1) * _cos
snake_case : Any = (big_a - 1) + (big_a + 1) * _cos
snake_case : List[Any] = 2 * sqrt(lowercase__ ) * alpha
snake_case : str = big_a * (pmc + aaa)
snake_case : Dict = 2 * big_a * mpc
snake_case : Union[str, Any] = big_a * (pmc - aaa)
snake_case : Optional[Any] = ppmc + aaa
snake_case : Tuple = -2 * pmpc
snake_case : List[Any] = ppmc - aaa
snake_case : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] = 1 / sqrt(2 ) , ):
snake_case : Tuple = tau * frequency / samplerate
snake_case : int = sin(lowercase__ )
snake_case : List[Any] = cos(lowercase__ )
snake_case : Any = _sin / (2 * q_factor)
snake_case : Optional[int] = 10 ** (gain_db / 40)
snake_case : int = (big_a + 1) - (big_a - 1) * _cos
snake_case : Dict = (big_a + 1) + (big_a - 1) * _cos
snake_case : Any = (big_a - 1) - (big_a + 1) * _cos
snake_case : List[str] = (big_a - 1) + (big_a + 1) * _cos
snake_case : Union[str, Any] = 2 * sqrt(lowercase__ ) * alpha
snake_case : Dict = big_a * (ppmc + aaa)
snake_case : Optional[Any] = -2 * big_a * pmpc
snake_case : Tuple = big_a * (ppmc - aaa)
snake_case : Optional[Any] = pmc + aaa
snake_case : Optional[Any] = 2 * mpc
snake_case : Dict = pmc - aaa
snake_case : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 134
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a: int = logging.get_logger(__name__)
_a: Optional[Any] = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = 'deformable_detr'
SCREAMING_SNAKE_CASE__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] , lowerCAmelCase : Any=True , lowerCAmelCase : str=None , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Tuple=300 , lowerCAmelCase : List[str]=1_024 , lowerCAmelCase : Any=6 , lowerCAmelCase : str=1_024 , lowerCAmelCase : Any=8 , lowerCAmelCase : Dict=6 , lowerCAmelCase : Any=1_024 , lowerCAmelCase : Dict=8 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]="relu" , lowerCAmelCase : List[str]=256 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : List[str]=1.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : int=False , lowerCAmelCase : Tuple="sine" , lowerCAmelCase : Optional[Any]="resnet50" , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Dict=4 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : int=False , lowerCAmelCase : Any=300 , lowerCAmelCase : Dict=False , lowerCAmelCase : Any=1 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : str=2 , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Any=5 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.25 , lowerCAmelCase : List[str]=False , **lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = backbone_config.get("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(lowerCAmelCase )
UpperCAmelCase_ = use_timm_backbone
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = backbone
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = dilation
# deformable attributes
UpperCAmelCase_ = num_feature_levels
UpperCAmelCase_ = encoder_n_points
UpperCAmelCase_ = decoder_n_points
UpperCAmelCase_ = two_stage
UpperCAmelCase_ = two_stage_num_proposals
UpperCAmelCase_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
UpperCAmelCase_ = focal_alpha
UpperCAmelCase_ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self : Any ):
'''simple docstring'''
return self.d_model
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 162
| 0
|
'''simple docstring'''
import numpy as np
def lowercase__( __UpperCamelCase: np.ndarray ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def lowercase__( __UpperCamelCase: np.ndarray ):
"""simple docstring"""
return vector * sigmoid(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowercase__( __UpperCamelCase: bytes ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = f"{sampling_rate}"
SCREAMING_SNAKE_CASE : str = '1'
SCREAMING_SNAKE_CASE : Optional[Any] = 'f32le'
SCREAMING_SNAKE_CASE : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__UpperCamelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(__UpperCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE : Union[str, Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(__UpperCamelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: str = "f32le" ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = f"{sampling_rate}"
SCREAMING_SNAKE_CASE : str = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[int] = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
SCREAMING_SNAKE_CASE : Optional[Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : List[str] = 'alsa'
SCREAMING_SNAKE_CASE : str = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Dict = 'avfoundation'
SCREAMING_SNAKE_CASE : int = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE : str = 'dshow'
SCREAMING_SNAKE_CASE : Any = 'default'
SCREAMING_SNAKE_CASE : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = _ffmpeg_stream(__UpperCamelCase ,__UpperCamelCase )
for item in iterator:
yield item
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[Union[Tuple[float, float], float]] = None ,__UpperCamelCase: str = "f32le" ,):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Any = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : Dict = chunk_length_s
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_microphone(__UpperCamelCase ,__UpperCamelCase ,format_for_conversion=__UpperCamelCase )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[int] = np.floataa
SCREAMING_SNAKE_CASE : List[Any] = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCamelCase ,(int, float) ):
SCREAMING_SNAKE_CASE : str = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=__UpperCamelCase )
for item in chunk_bytes_iter(__UpperCamelCase ,__UpperCamelCase ,stride=(stride_left, stride_right) ,stream=__UpperCamelCase ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : List[Any] = np.frombuffer(item['raw'] ,dtype=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Tuple[int, int] ,__UpperCamelCase: bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = B''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCamelCase ) < chunk_len:
SCREAMING_SNAKE_CASE : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : Optional[int] = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE : Optional[int] = False
yield item
SCREAMING_SNAKE_CASE : List[Any] = stride_left
SCREAMING_SNAKE_CASE : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCamelCase ) > stride_left:
SCREAMING_SNAKE_CASE : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : List[Any] = False
yield item
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 2**24 # 16Mo
try:
with subprocess.Popen(__UpperCamelCase ,stdout=subprocess.PIPE ,bufsize=__UpperCamelCase ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : Any = ffmpeg_process.stdout.read(__UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 508
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase )
class lowerCamelCase_ ( lowerCamelCase ):
a__ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ = Features({'''text''': Value('''string''' )} )
a__ = Features({} )
a__ = "text"
@property
def A ( self ):
"""simple docstring"""
return {self.text_column: "text"}
| 0
|
import argparse
import os
import re
import packaging.version
_UpperCAmelCase = """examples/"""
_UpperCAmelCase = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_UpperCAmelCase = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_UpperCAmelCase = """README.md"""
def UpperCamelCase ( __lowercase : Dict ,__lowercase : Any ,__lowercase : Tuple ):
'''simple docstring'''
with open(__lowercase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
A_ : Dict = f.read()
A_ , A_ : Dict = REPLACE_PATTERNS[pattern]
A_ : List[str] = replace.replace('VERSION' ,__lowercase )
A_ : int = re_pattern.sub(__lowercase ,__lowercase )
with open(__lowercase ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.write(__lowercase )
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(__lowercase ,__lowercase ) ,__lowercase ,pattern='examples' )
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Tuple=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowercase ,__lowercase ,__lowercase )
if not patch:
update_version_in_examples(__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = '🤗 Transformers currently provides the following architectures'
A_ : str = '1. Want to contribute a new model?'
with open(__lowercase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
A_ : int = f.readlines()
# Find the start of the list.
A_ : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
A_ : str = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' ,'https://huggingface.co/docs/transformers/model_doc' ,)
index += 1
with open(__lowercase ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES['init'] ,'r' ) as f:
A_ : Any = f.read()
A_ : Union[str, Any] = REPLACE_PATTERNS['init'][0].search(__lowercase ).groups()[0]
return packaging.version.parse(__lowercase )
def UpperCamelCase ( __lowercase : Tuple=False ):
'''simple docstring'''
A_ : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
A_ : Any = default_version.base_version
elif patch:
A_ : str = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ : Dict = input(f'''Which version are you releasing? [{default_version}]''' )
if len(__lowercase ) == 0:
A_ : Union[str, Any] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(__lowercase ,patch=__lowercase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = get_version()
A_ : List[str] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ : List[str] = current_version.base_version
# Check with the user we got that right.
A_ : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(__lowercase ) == 0:
A_ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(__lowercase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 558
| 0
|
"""simple docstring"""
import math
import unittest
def lowerCamelCase (a_ :int) -> bool:
assert isinstance(a_ , a_) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : Any ):
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 475
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "deit"
def __init__( self : List[str] , snake_case__ : int=7_6_8 , snake_case__ : List[Any]=1_2 , snake_case__ : Dict=1_2 , snake_case__ : List[Any]=3_0_7_2 , snake_case__ : Optional[int]="gelu" , snake_case__ : Dict=0.0 , snake_case__ : str=0.0 , snake_case__ : List[str]=0.02 , snake_case__ : Optional[int]=1e-1_2 , snake_case__ : Any=2_2_4 , snake_case__ : Optional[int]=1_6 , snake_case__ : int=3 , snake_case__ : str=True , snake_case__ : Optional[int]=1_6 , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :List[str] = hidden_size
lowercase :Union[str, Any] = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[Any] = intermediate_size
lowercase :int = hidden_act
lowercase :int = hidden_dropout_prob
lowercase :Dict = attention_probs_dropout_prob
lowercase :Optional[int] = initializer_range
lowercase :Tuple = layer_norm_eps
lowercase :Optional[int] = image_size
lowercase :Union[str, Any] = patch_size
lowercase :Optional[Any] = num_channels
lowercase :Tuple = qkv_bias
lowercase :Optional[Any] = encoder_stride
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[int] = version.parse("1.11" )
@property
def __snake_case ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return 1e-4
| 475
| 1
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCAmelCase ( snake_case : int ):
random.seed(snake_case )
np.random.seed(snake_case )
torch.manual_seed(snake_case )
torch.cuda.manual_seed_all(snake_case )
# ^^ safe to call this function even if cuda is not available
class a__ :
def __init__( self : Union[str, Any] ,a__ : Iterable[torch.nn.Parameter] ,a__ : float = 0.9999 ,a__ : float = 0.0 ,a__ : int = 0 ,a__ : bool = False ,a__ : Union[float, int] = 1.0 ,a__ : Union[float, int] = 2 / 3 ,a__ : Optional[Any] = None ,a__ : Dict[str, Any] = None ,**a__ : Any ,) -> List[Any]:
"""simple docstring"""
if isinstance(a__ ,torch.nn.Module):
_lowerCAmelCase:Tuple = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' ,'''1.0.0''' ,a__ ,standard_warn=a__ ,)
_lowerCAmelCase:str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCAmelCase:Optional[int] = True
if kwargs.get('''max_value''' ,a__) is not None:
_lowerCAmelCase:Tuple = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' ,'''1.0.0''' ,a__ ,standard_warn=a__)
_lowerCAmelCase:Dict = kwargs['''max_value''']
if kwargs.get('''min_value''' ,a__) is not None:
_lowerCAmelCase:List[str] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' ,'''1.0.0''' ,a__ ,standard_warn=a__)
_lowerCAmelCase:Any = kwargs['''min_value''']
_lowerCAmelCase:Any = list(a__)
_lowerCAmelCase:Any = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' ,a__) is not None:
_lowerCAmelCase:Union[str, Any] = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' ,'''1.0.0''' ,a__ ,standard_warn=a__)
self.to(device=kwargs['''device'''])
_lowerCAmelCase:int = None
_lowerCAmelCase:int = decay
_lowerCAmelCase:List[str] = min_decay
_lowerCAmelCase:Tuple = update_after_step
_lowerCAmelCase:List[str] = use_ema_warmup
_lowerCAmelCase:Optional[Any] = inv_gamma
_lowerCAmelCase:int = power
_lowerCAmelCase:Optional[int] = 0
_lowerCAmelCase:int = None # set in `step()`
_lowerCAmelCase:int = model_cls
_lowerCAmelCase:Union[str, Any] = model_config
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] ,a__ : List[str] ,a__ : List[str]) -> "EMAModel":
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:str = model_cls.load_config(a__ ,return_unused_kwargs=a__)
_lowerCAmelCase:Dict = model_cls.from_pretrained(a__)
_lowerCAmelCase:Tuple = cls(model.parameters() ,model_cls=a__ ,model_config=model.config)
ema_model.load_state_dict(a__)
return ema_model
def __UpperCamelCase ( self : str ,a__ : Optional[int]) -> Tuple:
"""simple docstring"""
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''')
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''')
_lowerCAmelCase:Optional[Any] = self.model_cls.from_config(self.model_config)
_lowerCAmelCase:int = self.state_dict()
state_dict.pop('''shadow_params''' ,a__)
model.register_to_config(**a__)
self.copy_to(model.parameters())
model.save_pretrained(a__)
def __UpperCamelCase ( self : int ,a__ : int) -> float:
"""simple docstring"""
_lowerCAmelCase:Tuple = max(0 ,optimization_step - self.update_after_step - 1)
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCAmelCase:List[str] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCAmelCase:List[str] = (1 + step) / (10 + step)
_lowerCAmelCase:Optional[Any] = min(a__ ,self.decay)
# make sure decay is not smaller than min_decay
_lowerCAmelCase:int = max(a__ ,self.min_decay)
return cur_decay_value
@torch.no_grad()
def __UpperCamelCase ( self : Any ,a__ : Iterable[torch.nn.Parameter]) -> List[Any]:
"""simple docstring"""
if isinstance(a__ ,torch.nn.Module):
_lowerCAmelCase:Any = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' ,'''1.0.0''' ,a__ ,standard_warn=a__ ,)
_lowerCAmelCase:Any = parameters.parameters()
_lowerCAmelCase:Tuple = list(a__)
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCAmelCase:Optional[Any] = self.get_decay(self.optimization_step)
_lowerCAmelCase:Any = decay
_lowerCAmelCase:List[str] = 1 - decay
_lowerCAmelCase:List[str] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params ,a__):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCAmelCase:List[Any] = deepspeed.zero.GatheredParameters(a__ ,modifier_rank=a__)
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param))
else:
s_param.copy_(a__)
def __UpperCamelCase ( self : Tuple ,a__ : Iterable[torch.nn.Parameter]) -> None:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = list(a__)
for s_param, param in zip(self.shadow_params ,a__):
param.data.copy_(s_param.to(param.device).data)
def __UpperCamelCase ( self : str ,a__ : str=None ,a__ : Optional[Any]=None) -> None:
"""simple docstring"""
_lowerCAmelCase:str = [
p.to(device=a__ ,dtype=a__) if p.is_floating_point() else p.to(device=a__)
for p in self.shadow_params
]
def __UpperCamelCase ( self : Union[str, Any]) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __UpperCamelCase ( self : Dict ,a__ : Iterable[torch.nn.Parameter]) -> None:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = [param.detach().cpu().clone() for param in parameters]
def __UpperCamelCase ( self : int ,a__ : Iterable[torch.nn.Parameter]) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''')
for c_param, param in zip(self.temp_stored_params ,a__):
param.data.copy_(c_param.data)
# Better memory-wise.
_lowerCAmelCase:Union[str, Any] = None
def __UpperCamelCase ( self : Optional[int] ,a__ : dict) -> None:
"""simple docstring"""
_lowerCAmelCase:List[Any] = copy.deepcopy(a__)
_lowerCAmelCase:Tuple = state_dict.get('''decay''' ,self.decay)
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''')
_lowerCAmelCase:Optional[Any] = state_dict.get('''min_decay''' ,self.min_decay)
if not isinstance(self.min_decay ,a__):
raise ValueError('''Invalid min_decay''')
_lowerCAmelCase:List[str] = state_dict.get('''optimization_step''' ,self.optimization_step)
if not isinstance(self.optimization_step ,a__):
raise ValueError('''Invalid optimization_step''')
_lowerCAmelCase:List[Any] = state_dict.get('''update_after_step''' ,self.update_after_step)
if not isinstance(self.update_after_step ,a__):
raise ValueError('''Invalid update_after_step''')
_lowerCAmelCase:Optional[Any] = state_dict.get('''use_ema_warmup''' ,self.use_ema_warmup)
if not isinstance(self.use_ema_warmup ,a__):
raise ValueError('''Invalid use_ema_warmup''')
_lowerCAmelCase:Optional[Any] = state_dict.get('''inv_gamma''' ,self.inv_gamma)
if not isinstance(self.inv_gamma ,(float, int)):
raise ValueError('''Invalid inv_gamma''')
_lowerCAmelCase:Dict = state_dict.get('''power''' ,self.power)
if not isinstance(self.power ,(float, int)):
raise ValueError('''Invalid power''')
_lowerCAmelCase:Optional[Any] = state_dict.get('''shadow_params''' ,a__)
if shadow_params is not None:
_lowerCAmelCase:Tuple = shadow_params
if not isinstance(self.shadow_params ,a__):
raise ValueError('''shadow_params must be a list''')
if not all(isinstance(a__ ,torch.Tensor) for p in self.shadow_params):
raise ValueError('''shadow_params must all be Tensors''')
| 227
|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCAmelCase ( snake_case : str ):
if "model" in orig_key:
_lowerCAmelCase:str = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
_lowerCAmelCase:List[Any] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
_lowerCAmelCase:Any = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
_lowerCAmelCase:Tuple = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
_lowerCAmelCase:Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
_lowerCAmelCase:Optional[int] = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_lowerCAmelCase:Dict = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
_lowerCAmelCase:Union[str, Any] = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
_lowerCAmelCase:int = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
_lowerCAmelCase:Tuple = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
_lowerCAmelCase:str = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
_lowerCAmelCase:List[Any] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
_lowerCAmelCase:int = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
_lowerCAmelCase:Any = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
_lowerCAmelCase:Optional[Any] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
_lowerCAmelCase:str = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
_lowerCAmelCase:str = '''yoso.''' + orig_key
return orig_key
def UpperCAmelCase ( snake_case : Dict , snake_case : List[Any] ):
for key in orig_state_dict.copy().keys():
_lowerCAmelCase:Any = orig_state_dict.pop(snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowerCAmelCase:Optional[Any] = val
_lowerCAmelCase:Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
_lowerCAmelCase:Union[str, Any] = torch.arange(snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCAmelCase ( snake_case : Any , snake_case : Union[str, Any] , snake_case : List[Any] ):
_lowerCAmelCase:str = torch.load(snake_case , map_location='''cpu''' )['''model_state_dict''']
_lowerCAmelCase:List[str] = YosoConfig.from_json_file(snake_case )
_lowerCAmelCase:Optional[int] = YosoForMaskedLM(snake_case )
_lowerCAmelCase:Tuple = convert_checkpoint_helper(config.max_position_embeddings , snake_case )
print(model.load_state_dict(snake_case ) )
model.eval()
model.save_pretrained(snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 227
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = BlenderbotConfig
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Dict = 'gelu'
def __init__( self : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Tuple=1_3 ,lowercase__ : Optional[int]=7 ,lowercase__ : List[str]=True ,lowercase__ : Tuple=False ,lowercase__ : Union[str, Any]=9_9 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=2 ,lowercase__ : Dict=4 ,lowercase__ : Any=3_7 ,lowercase__ : Tuple=0.1 ,lowercase__ : Tuple=0.1 ,lowercase__ : List[str]=2_0 ,lowercase__ : List[str]=2 ,lowercase__ : Optional[int]=1 ,lowercase__ : Any=0 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
__lowercase = tf.concat([input_ids, eos_tensor] ,axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
__lowercase = prepare_blenderbot_inputs_dict(lowercase__ ,lowercase__ ,lowercase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : str ):
__lowercase = TFBlenderbotModel(config=lowercase__ ).get_decoder()
__lowercase = inputs_dict['''input_ids''']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['''attention_mask'''][:1, :]
__lowercase = inputs_dict['''head_mask''']
__lowercase = 1
# first forward pass
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,head_mask=lowercase__ ,use_cache=lowercase__ )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] ,axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ ,lowercase__ ,rtol=1e-3 )
def _A ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ):
"""simple docstring"""
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : List[str] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : int = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : str = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = TFBlenderbotModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
@require_tokenizers
@require_tf
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['My friends are cool but they eat too many carbs.']
SCREAMING_SNAKE_CASE : List[str] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.tokenizer(self.src_text ,return_tensors='''tf''' )
__lowercase = self.model.generate(
model_inputs.input_ids ,)
__lowercase = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=lowercase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 624
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase__ = (720, 1280) # Height, Width
lowerCAmelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase__ = 1 / 100
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 250
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = get_dataset(A__ , A__ )
for index in range(A__ ):
__lowercase = random.sample(range(len(A__ ) ) , 4 )
__lowercase , __lowercase , __lowercase = update_image_and_anno(
A__ , A__ , A__ , A__ , A__ , filter_scale=A__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase = random_chars(32 )
__lowercase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowercase = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
__lowercase = []
for anno in new_annos:
__lowercase = anno[3] - anno[1]
__lowercase = anno[4] - anno[2]
__lowercase = anno[1] + width / 2
__lowercase = anno[2] + height / 2
__lowercase = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(A__ )
with open(F"{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = []
for label_file in glob.glob(os.path.join(A__ , '''*.txt''' ) ):
__lowercase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(A__ ) as in_file:
__lowercase = in_file.readlines()
__lowercase = os.path.join(A__ , F"{label_name}.jpg" )
__lowercase = []
for obj_list in obj_lists:
__lowercase = obj_list.rstrip('''\n''' ).split(''' ''' )
__lowercase = float(obj[1] ) - float(obj[3] ) / 2
__lowercase = float(obj[2] ) - float(obj[4] ) / 2
__lowercase = float(obj[1] ) + float(obj[3] ) / 2
__lowercase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def _A ( A__ , A__ , A__ , A__ , A__ , A__ = 0.0 , ):
"""simple docstring"""
__lowercase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = int(scale_x * output_size[1] )
__lowercase = int(scale_y * output_size[0] )
__lowercase = []
__lowercase = []
for i, index in enumerate(A__ ):
__lowercase = all_img_list[index]
path_list.append(A__ )
__lowercase = all_annos[index]
__lowercase = cva.imread(A__ )
if i == 0: # top-left
__lowercase = cva.resize(A__ , (divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = bbox[2] * scale_y
__lowercase = bbox[3] * scale_x
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowercase = cva.resize(A__ , (output_size[1] - divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = bbox[2] * scale_y
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowercase = cva.resize(A__ , (divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = bbox[3] * scale_x
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowercase = cva.resize(
A__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowercase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( A__ ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__lowercase = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 624
| 1
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = [False] * len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = []
queue.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = True
while queue:
UpperCAmelCase_ : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[Any] = u
return visited[t]
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : str = [-1] * (len(_SCREAMING_SNAKE_CASE ))
UpperCAmelCase_ : Any = 0
while bfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[Any] = float("Inf" )
UpperCAmelCase_ : List[Any] = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : List[str] = min(_SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
UpperCAmelCase_ : str = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Dict = sink
while v != source:
UpperCAmelCase_ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[Any] = parent[v]
return max_flow
_lowerCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCamelCase , _lowerCamelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 71
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657
| 0
|
def lowerCamelCase__ ( _lowerCamelCase = 3 , _lowerCamelCase = 7 , _lowerCamelCase = 100_0000 ) ->int:
_UpperCAmelCase =0
_UpperCAmelCase =1
for current_denominator in range(1 , limit + 1 ):
_UpperCAmelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCAmelCase =current_numerator
_UpperCAmelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 705
|
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->bool:
_UpperCAmelCase =get_failure_array(_lowerCamelCase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase =0, 0 # index into text, pattern
while i < len(_lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase =failure[j - 1]
continue
i += 1
return False
def lowerCamelCase__ ( _lowerCamelCase ) ->list[int]:
_UpperCAmelCase =[0]
_UpperCAmelCase =0
_UpperCAmelCase =1
while j < len(_lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase =failure[i - 1]
continue
j += 1
failure.append(_lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
snake_case__ : Dict = 'abc1abc12'
snake_case__ : Tuple = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case__ : int = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
snake_case__ : Tuple = 'ABABX'
snake_case__ : Any = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
snake_case__ : Optional[int] = 'AAAB'
snake_case__ : Optional[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
snake_case__ : int = 'abcdabcy'
snake_case__ : int = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
snake_case__ : str = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 592
| 0
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_snake_case : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_snake_case : Dict = 'main'
# Default branch name
_snake_case : Union[str, Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
_snake_case : Tuple = 'aaaaaaa'
# This commit does not exist, so we should 404.
_snake_case : Any = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
_snake_case : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def lowerCAmelCase_ ( ):
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def lowerCAmelCase_ ( ):
print("Bonjour!" )
yield
print("Au revoir!" )
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int ) -> Optional[int]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class a (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] ) -> str:
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[int] ) -> str:
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __snake_case ( self : Dict , lowerCamelCase : List[str] ) -> Tuple:
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def __snake_case ( self : List[str] ) -> List[Any]:
self.assertEqual(find_labels(_UpperCamelCase ) , ["labels"] )
self.assertEqual(find_labels(_UpperCamelCase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(_UpperCamelCase ) , ["start_positions", "end_positions"] )
class a (lowerCamelCase_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ["labels"] )
@require_tf
def __snake_case ( self : Tuple ) -> Tuple:
self.assertEqual(find_labels(_UpperCamelCase ) , ["labels"] )
self.assertEqual(find_labels(_UpperCamelCase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(_UpperCamelCase ) , ["start_positions", "end_positions"] )
class a (lowerCamelCase_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ["labels"] )
@require_flax
def __snake_case ( self : Any ) -> int:
# Flax models don't have labels
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
class a (lowerCamelCase_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
| 81
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_lowercase: Dict = cst_fwd.get(__magic_name__ , np.inf )
_lowercase: Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_lowercase: Optional[Any] = new_cost_f
_lowercase: Union[str, Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_lowercase: List[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: List[Any] = -1
_lowercase: Union[str, Any] = set()
_lowercase: Any = set()
_lowercase: Tuple = {source: 0}
_lowercase: Tuple = {destination: 0}
_lowercase: Optional[Any] = {source: None}
_lowercase: Any = {destination: None}
_lowercase: PriorityQueue[Any] = PriorityQueue()
_lowercase: PriorityQueue[Any] = PriorityQueue()
_lowercase: Optional[int] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_lowercase , _lowercase: Optional[Any] = queue_forward.get()
visited_forward.add(__magic_name__ )
_lowercase , _lowercase: List[str] = queue_backward.get()
visited_backward.add(__magic_name__ )
_lowercase: Optional[int] = pass_and_relaxation(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
_lowercase: List[str] = pass_and_relaxation(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_lowercase: Optional[Any] = shortest_distance
return shortest_path_distance
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
_SCREAMING_SNAKE_CASE : Tuple = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
| 0
|
from __future__ import annotations
from collections import Counter
from random import random
class _lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = {}
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase__ : Any = {}
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = probability
def __SCREAMING_SNAKE_CASE ( self ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : str = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
UpperCamelCase__ : List[str] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : str = Counter(graph.get_nodes() )
UpperCamelCase__ : int = start
for _ in range(UpperCamelCase__ ):
UpperCamelCase__ : Optional[int] = graph.transition(UpperCamelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[int] = patch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : Optional[Any] = is_training
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Tuple = type_sequence_label_size
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : List[str] = scope
UpperCamelCase__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase__ : Any = (image_size // patch_size) ** 2
UpperCamelCase__ : Any = num_patches + 2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Dict = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Tuple = TFDeiTModel(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = TFDeiTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : Any = TFDeiTForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.type_sequence_label_size
UpperCamelCase__ : Optional[Any] = TFDeiTForImageClassification(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ : int = 1
UpperCamelCase__ : int = TFDeiTForImageClassification(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = config_and_inputs
UpperCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = TFDeiTModelTester(self )
UpperCamelCase__ : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : str = [*signature.parameters.keys()]
UpperCamelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Any = TFDeiTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
UpperCamelCase__ : str = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 462
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : Any = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : int = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : List[Any] = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Dict = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 331
|
'''simple docstring'''
def _lowerCAmelCase (_lowercase = 3 , _lowercase = 7 , _lowercase = 1_00_00_00 ):
"""simple docstring"""
a__ = 0
a__ = 1
for current_denominator in range(1 , limit + 1 ):
a__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__ = current_numerator
a__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 331
| 1
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
UpperCamelCase = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
UpperCamelCase = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
UpperCamelCase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
UpperCamelCase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
UpperCamelCase = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
UpperCamelCase = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
UpperCamelCase = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
UpperCamelCase = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
UpperCamelCase = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
UpperCamelCase = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
UpperCamelCase = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
UpperCamelCase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
UpperCamelCase = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
UpperCamelCase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
UpperCamelCase = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
UpperCamelCase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
UpperCamelCase = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
UpperCamelCase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
UpperCamelCase = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
UpperCamelCase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
UpperCamelCase = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
UpperCamelCase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
UpperCamelCase = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
UpperCamelCase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
UpperCamelCase = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
UpperCamelCase = ''
UpperCamelCase = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
UpperCamelCase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
UpperCamelCase = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase_ , lowerCAmelCase_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase_ , match=re.escape(expected_error.format(path="root" ) ) ):
lowerCAmelCase__ = ReadMe.from_string(lowerCAmelCase_ , lowerCAmelCase_ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase_ , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase_ , lowerCAmelCase_ , suppress_parsing_errors=lowerCAmelCase_ )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = Path(lowerCAmelCase_ ) / "README.md"
with open(lowerCAmelCase_ , "w+" ) as readme_file:
readme_file.write(lowerCAmelCase_ )
lowerCAmelCase__ = ReadMe.from_readme(lowerCAmelCase_ , lowerCAmelCase_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = Path(lowerCAmelCase_ ) / "README.md"
with open(lowerCAmelCase_ , "w+" ) as readme_file:
readme_file.write(lowerCAmelCase_ )
lowerCAmelCase__ = expected_error.format(path=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ , match=re.escape(lowerCAmelCase_ ) ):
lowerCAmelCase__ = ReadMe.from_readme(lowerCAmelCase_ , lowerCAmelCase_ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = Path(lowerCAmelCase_ ) / "README.md"
with open(lowerCAmelCase_ , "w+" ) as readme_file:
readme_file.write(lowerCAmelCase_ )
lowerCAmelCase__ = expected_error.format(path=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ , match=re.escape(lowerCAmelCase_ ) ):
ReadMe.from_readme(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( lowerCAmelCase_ : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = Path(lowerCAmelCase_ ) / "README.md"
with open(lowerCAmelCase_ , "w+" ) as readme_file:
readme_file.write(lowerCAmelCase_ )
ReadMe.from_readme(lowerCAmelCase_ , lowerCAmelCase_ , suppress_parsing_errors=lowerCAmelCase_ )
| 125
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = BigBirdConfig.from_json_file(lowerCAmelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(lowerCAmelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(lowerCAmelCase_ , lowerCAmelCase_ , is_trivia_qa=lowerCAmelCase_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 125
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class UpperCAmelCase :
def __init__( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : int=1_3 , __lowerCamelCase : Dict=7 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=9_9 , __lowerCamelCase : int=3_2 , __lowerCamelCase : str=2 , __lowerCamelCase : str=4 , __lowerCamelCase : str=3_7 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Tuple=5_1_2 , __lowerCamelCase : Dict=1_6 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=1_0_0_0 , ):
UpperCAmelCase__ :Optional[Any] = parent
UpperCAmelCase__ :List[Any] = batch_size
UpperCAmelCase__ :Any = seq_length
UpperCAmelCase__ :Optional[int] = is_training
UpperCAmelCase__ :Union[str, Any] = use_input_mask
UpperCAmelCase__ :List[str] = use_token_type_ids
UpperCAmelCase__ :Union[str, Any] = use_labels
UpperCAmelCase__ :Any = vocab_size
UpperCAmelCase__ :str = hidden_size
UpperCAmelCase__ :Tuple = num_hidden_layers
UpperCAmelCase__ :Any = num_attention_heads
UpperCAmelCase__ :Optional[Any] = intermediate_size
UpperCAmelCase__ :Any = hidden_act
UpperCAmelCase__ :str = hidden_dropout_prob
UpperCAmelCase__ :Any = attention_probs_dropout_prob
UpperCAmelCase__ :List[Any] = max_position_embeddings
UpperCAmelCase__ :int = type_vocab_size
UpperCAmelCase__ :int = type_sequence_label_size
UpperCAmelCase__ :List[str] = initializer_range
UpperCAmelCase__ :Dict = num_labels
UpperCAmelCase__ :Any = num_choices
UpperCAmelCase__ :Any = scope
UpperCAmelCase__ :Dict = range_bbox
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ :Union[str, Any] = bbox[i, j, 3]
UpperCAmelCase__ :Any = bbox[i, j, 1]
UpperCAmelCase__ :Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ :int = bbox[i, j, 2]
UpperCAmelCase__ :Union[str, Any] = bbox[i, j, 0]
UpperCAmelCase__ :Tuple = t
UpperCAmelCase__ :Optional[int] = tf.convert_to_tensor(__lowerCamelCase )
UpperCAmelCase__ :str = None
if self.use_input_mask:
UpperCAmelCase__ :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ :str = None
if self.use_token_type_ids:
UpperCAmelCase__ :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ :Tuple = None
UpperCAmelCase__ :Union[str, Any] = None
UpperCAmelCase__ :List[Any] = None
if self.use_labels:
UpperCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ :str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ :Union[str, Any] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Any ):
UpperCAmelCase__ :Any = TFLayoutLMModel(config=__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = model(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
UpperCAmelCase__ :int = model(__lowerCamelCase , __lowerCamelCase , token_type_ids=__lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ):
UpperCAmelCase__ :Any = TFLayoutLMForMaskedLM(config=__lowerCamelCase )
UpperCAmelCase__ :Dict = model(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int ):
UpperCAmelCase__ :Union[str, Any] = self.num_labels
UpperCAmelCase__ :List[str] = TFLayoutLMForSequenceClassification(config=__lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = model(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ):
UpperCAmelCase__ :int = self.num_labels
UpperCAmelCase__ :Any = TFLayoutLMForTokenClassification(config=__lowerCamelCase )
UpperCAmelCase__ :Any = model(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
UpperCAmelCase__ :Any = TFLayoutLMForQuestionAnswering(config=__lowerCamelCase )
UpperCAmelCase__ :Dict = model(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) :Union[str, Any] = config_and_inputs
UpperCAmelCase__ :Optional[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = 10
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Union[str, Any] = TFLayoutLMModelTester(self )
UpperCAmelCase__ :Tuple = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
UpperCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ :Optional[int] = TFLayoutLMModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def a__ ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
UpperCAmelCase__ :int = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
UpperCAmelCase__ :Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
UpperCAmelCase__ :str = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
UpperCAmelCase__ :Dict = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCAmelCase__ :Optional[int] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :Optional[Any] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase__ :List[str] = model(input_ids=__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
# test the sequence output on [0, :3, :3]
UpperCAmelCase__ :Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-3 ) )
# test the pooled output on [1, :3]
UpperCAmelCase__ :Dict = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __lowerCamelCase , atol=1e-3 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
# initialize model with randomly initialized sequence classification head
UpperCAmelCase__ :str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Any = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase__ :Any = model(
input_ids=__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
UpperCAmelCase__ :Tuple = outputs.loss
UpperCAmelCase__ :Dict = (2,)
self.assertEqual(loss.shape , __lowerCamelCase )
# test the shape of the logits
UpperCAmelCase__ :Dict = outputs.logits
UpperCAmelCase__ :List[Any] = (2, 2)
self.assertEqual(logits.shape , __lowerCamelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# initialize model with randomly initialized token classification head
UpperCAmelCase__ :Tuple = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=1_3 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Tuple = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase__ :Tuple = model(
input_ids=__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
# test the shape of the logits
UpperCAmelCase__ :Optional[int] = outputs.logits
UpperCAmelCase__ :Union[str, Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , __lowerCamelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
# initialize model with randomly initialized token classification head
UpperCAmelCase__ :List[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :str = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase__ :Optional[int] = model(input_ids=__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
# test the shape of the logits
UpperCAmelCase__ :List[Any] = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , __lowerCamelCase )
self.assertEqual(outputs.end_logits.shape , __lowerCamelCase )
| 467
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str ):
UpperCAmelCase__ :Any = list(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = list(UpperCamelCase_ )
UpperCAmelCase__ :str = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase__ :Union[str, Any] = '''_'''
if count > 1:
return False
else:
return "".join(UpperCamelCase_ )
def a__ ( UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :int = []
while True:
UpperCAmelCase__ :Dict = ['''$'''] * len(UpperCamelCase_ )
UpperCAmelCase__ :List[str] = []
for i in range(len(UpperCamelCase_ ) ):
for j in range(i + 1, len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Optional[Any] = compare_string(binary[i], binary[j] )
if k is False:
UpperCAmelCase__ :Any = '''*'''
UpperCAmelCase__ :List[Any] = '''*'''
temp.append('''X''' )
for i in range(len(UpperCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase_ ) == 0:
return pi
UpperCAmelCase__ :Tuple = list(set(UpperCamelCase_ ) )
def a__ ( UpperCamelCase_ : int, UpperCamelCase_ : Sequence[float] ):
UpperCAmelCase__ :int = []
for minterm in minterms:
UpperCAmelCase__ :int = ''''''
for _ in range(UpperCamelCase_ ):
UpperCAmelCase__ :Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase_ )
return temp
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : int ):
UpperCAmelCase__ :Dict = list(UpperCamelCase_ )
UpperCAmelCase__ :str = list(UpperCamelCase_ )
UpperCAmelCase__ :str = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a__ ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :Optional[Any] = []
UpperCAmelCase__ :List[Any] = [0] * len(UpperCamelCase_ )
for i in range(len(chart[0] ) ):
UpperCAmelCase__ :Optional[Any] = 0
UpperCAmelCase__ :Union[str, Any] = -1
for j in range(len(UpperCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase__ :Any = j
if count == 1:
UpperCAmelCase__ :Any = 1
for i in range(len(UpperCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :int = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase__ :Optional[int] = 0
UpperCAmelCase__ :Dict = -1
UpperCAmelCase__ :Optional[Any] = 0
for i in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :str = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase__ :Any = count_n
UpperCAmelCase__ :List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Optional[int] = 0
def a__ ( UpperCamelCase_ : list[str], UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :List[str] = [[0 for x in range(len(UpperCamelCase_ ) )] for x in range(len(UpperCamelCase_ ) )]
for i in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Tuple = prime_implicants[i].count('''_''' )
for j in range(len(UpperCamelCase_ ) ):
if is_for_table(prime_implicants[i], binary[j], UpperCamelCase_ ):
UpperCAmelCase__ :List[str] = 1
return chart
def a__ ( ):
UpperCAmelCase__ :int = int(input('''Enter the no. of variables\n''' ) )
UpperCAmelCase__ :Tuple = [
float(UpperCamelCase_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCAmelCase__ :Union[str, Any] = decimal_to_binary(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Optional[Any] = check(UpperCamelCase_ )
print('''Prime Implicants are:''' )
print(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = prime_implicant_chart(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Dict = selection(UpperCamelCase_, UpperCamelCase_ )
print('''Essential Prime Implicants are:''' )
print(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 467
| 1
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73
| 1
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = FileLock(str(tmpdir / "foo.lock" ) )
lowercase = FileLock(str(tmpdir / "foo.lock" ) )
lowercase = 0.01
with locka.acquire():
with pytest.raises(lowerCAmelCase_ ):
lowercase = time.time()
locka.acquire(lowerCAmelCase_ )
assert time.time() - _start > timeout
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = "a" * 1000 + ".lock"
lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(lowerCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowerCAmelCase_ ):
locka.acquire(0 )
| 310
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCAmelCase :
def __init__(self : Optional[Any] , A__ : Optional[Any] , A__ : Optional[int]=sys.maxsize ) -> Optional[Any]:
lowercase = "bilinear"
lowercase = max_size
lowercase = short_edge_length
def __call__(self : Union[str, Any] , A__ : Optional[int] ) -> Tuple:
lowercase = []
for img in imgs:
lowercase , lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase = size * 1.0 / min(A__ , A__ )
if h < w:
lowercase , lowercase = size, scale * w
else:
lowercase , lowercase = scale * h, size
if max(A__ , A__ ) > self.max_size:
lowercase = self.max_size * 1.0 / max(A__ , A__ )
lowercase = newh * scale
lowercase = neww * scale
lowercase = int(neww + 0.5 )
lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase = Image.fromarray(A__ )
lowercase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase = np.asarray(A__ )
else:
lowercase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase = nn.functional.interpolate(
A__ , (newh, neww) , mode=self.interp_method , align_corners=A__ ).squeeze(0 )
img_augs.append(A__ )
return img_augs
class UpperCAmelCase :
def __init__(self : Union[str, Any] , A__ : List[Any] ) -> Optional[int]:
lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase = cfg.INPUT.FORMAT
lowercase = cfg.SIZE_DIVISIBILITY
lowercase = cfg.PAD_VALUE
lowercase = cfg.INPUT.MAX_SIZE_TEST
lowercase = cfg.MODEL.DEVICE
lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = lambda A__ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase__ (self : List[Any] , A__ : Any ) -> int:
lowercase = tuple(max(A__ ) for s in zip(*[img.shape for img in images] ) )
lowercase = [im.shape[-2:] for im in images]
lowercase = [
nn.functional.pad(
A__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(A__ , A__ )
]
return torch.stack(A__ ), torch.tensor(A__ )
def __call__(self : Optional[int] , A__ : Union[str, Any] , A__ : Optional[Any]=False ) -> str:
with torch.no_grad():
if not isinstance(A__ , A__ ):
lowercase = [images]
if single_image:
assert len(A__ ) == 1
for i in range(len(A__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(A__ , images.pop(A__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
A__ , torch.as_tensor(img_tensorize(images.pop(A__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase = torch.tensor([im.shape[:2] for im in images] )
lowercase = self.aug(A__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase = [self.normalizer(A__ ) for x in images]
# now pad them to do the following operations
lowercase , lowercase = self.pad(A__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase = torch.true_divide(A__ , A__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
assert torch.isfinite(lowerCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
lowercase , lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase_ )
| 310
| 1
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691
| 1
|
def SCREAMING_SNAKE_CASE__ ( ):
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
_lowerCamelCase = generate_large_matrix()
_lowerCamelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[int]] ):
assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid )
assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int] ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE__ = (left + right) // 2
SCREAMING_SNAKE_CASE__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE__ = mid + 1
else:
SCREAMING_SNAKE_CASE__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[int]] ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(grid[0] )
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCamelCase__ ) * len(grid[0] )) - total
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[int]] ):
SCREAMING_SNAKE_CASE__ = 0
for row in grid:
for i, number in enumerate(UpperCamelCase__ ):
if number < 0:
total += len(UpperCamelCase__ ) - i
break
return total
def SCREAMING_SNAKE_CASE__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
SCREAMING_SNAKE_CASE__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE__ = timeit(f'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 6
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "OwlViTImageProcessor"
_a = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> List[str]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _a , )
_A : Optional[Any] = kwargs.pop("""feature_extractor""" )
_A : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a="max_length" , _a="np" , **_a ) -> int:
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_a , _a ) or (isinstance(_a , _a ) and not isinstance(text[0] , _a )):
_A : Optional[int] = [self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )]
elif isinstance(_a , _a ) and isinstance(text[0] , _a ):
_A : Tuple = []
# Maximum number of queries across batch
_A : Optional[int] = max([len(_a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_a ) != max_num_queries:
_A : Optional[Any] = t + [""" """] * (max_num_queries - len(_a ))
_A : Union[str, Any] = self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )
encodings.append(_a )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
_A : Union[str, Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_A : Optional[Any] = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[Any] = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_A : int = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Any = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
_A : List[str] = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Optional[Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_A : List[Any] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
_A : List[Any] = BatchEncoding()
_A : Optional[Any] = input_ids
_A : str = attention_mask
if query_images is not None:
_A : Tuple = BatchEncoding()
_A : Dict = self.image_processor(
_a , return_tensors=_a , **_a ).pixel_values
_A : Optional[Any] = query_pixel_values
if images is not None:
_A : Dict = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_A : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def a__ ( self , *_a , **_a ) -> List[Any]:
return self.image_processor.post_process(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.image_processor.post_process_object_detection(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Optional[Any]:
return self.image_processor.post_process_image_guided_detection(*_a , **_a )
def a__ ( self , *_a , **_a ) -> str:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Tuple:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Tuple:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , )
return self.image_processor_class
@property
def a__ ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , )
return self.image_processor
| 307
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowercase_ ( __lowercase , __lowercase ):
_lowerCamelCase = 'bit'
_lowerCamelCase = ['preactivation', 'bottleneck']
_lowerCamelCase = ['SAME', 'VALID']
def __init__( self , lowercase_=3 , lowercase_=64 , lowercase_=[256, 512, 1_024, 2_048] , lowercase_=[3, 4, 6, 3] , lowercase_="preactivation" , lowercase_="relu" , lowercase_=None , lowercase_=32 , lowercase_=0.0 , lowercase_=False , lowercase_=32 , lowercase_=1 , lowercase_=None , lowercase_=None , **lowercase_ , ):
super().__init__(**__A )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_snake_case : Optional[Any] = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
_snake_case : str = num_channels
_snake_case : Tuple = embedding_size
_snake_case : int = hidden_sizes
_snake_case : str = depths
_snake_case : str = layer_type
_snake_case : str = hidden_act
_snake_case : List[Any] = global_padding
_snake_case : Optional[int] = num_groups
_snake_case : str = drop_path_rate
_snake_case : List[str] = embedding_dynamic_padding
_snake_case : Optional[int] = output_stride
_snake_case : str = width_factor
_snake_case : List[Any] = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__A ) + 1 )]
_snake_case ,_snake_case : Dict = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 714
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ):
_snake_case : Any = parent
_snake_case : int = out_indices if out_indices is not None else [4]
_snake_case : Any = stage_names
_snake_case : Optional[Any] = out_features
_snake_case : Dict = backbone
_snake_case : List[str] = batch_size
_snake_case : Optional[int] = image_size
_snake_case : str = num_channels
_snake_case : Optional[Any] = use_pretrained_backbone
_snake_case : str = is_training
def UpperCamelCase ( self ):
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[str] = self.get_config()
return config, pixel_values
def UpperCamelCase ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TimmBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_snake_case : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase ( self ):
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case ,_snake_case : List[Any] = config_and_inputs
_snake_case : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
_lowerCamelCase = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Dict = TimmBackboneModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
_snake_case : Dict = "resnet18"
_snake_case : Tuple = "microsoft/resnet-18"
_snake_case : Tuple = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ )
_snake_case : List[str] = AutoBackbone.from_pretrained(lowercase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_snake_case : List[str] = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3] )
_snake_case : Optional[int] = AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def UpperCamelCase ( self ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(lowercase_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Union[str, Any] = [*signature.parameters.keys()]
_snake_case : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = True
_snake_case : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
_snake_case : Dict = self.all_model_classes[0]
_snake_case : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
_snake_case : List[str] = self._prepare_for_class(lowercase_ , lowercase_ )
_snake_case : List[Any] = model(**lowercase_ )
_snake_case : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
_snake_case : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_snake_case : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Any = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_snake_case : Union[str, Any] = copy.deepcopy(lowercase_ )
_snake_case : int = None
_snake_case : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : int = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_snake_case : Dict = copy.deepcopy(lowercase_ )
_snake_case : Dict = False
_snake_case : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Any = model(**lowercase_ )
| 580
| 0
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
a_ = TypeVar('KEY')
a_ = TypeVar('VAL')
@dataclass(frozen=snake_case , slots=snake_case )
class UpperCAmelCase_ ( Generic[KEY, VAL] ):
UpperCamelCase =42
UpperCamelCase =42
class UpperCAmelCase_ ( _Item ):
def __init__( self ) -> None:
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __bool__( self ) -> bool:
return False
a_ = _DeletedItem()
class UpperCAmelCase_ ( MutableMapping[KEY, VAL] ):
def __init__( self , UpperCamelCase_ = 8 , UpperCamelCase_ = 0.7_5 ) -> None:
__lowercase : Any = initial_block_size
__lowercase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase : str = capacity_factor
__lowercase : Dict = 0
def _lowerCamelCase ( self , UpperCamelCase_ ) -> int:
return hash(UpperCamelCase_ ) % len(self._buckets )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
__lowercase : Dict = self._buckets[ind]
if not stored:
__lowercase : Any = _Item(UpperCamelCase_ , UpperCamelCase_ )
self._len += 1
return True
elif stored.key == key:
__lowercase : Optional[Any] = _Item(UpperCamelCase_ , UpperCamelCase_ )
return True
else:
return False
def _lowerCamelCase ( self ) -> bool:
__lowercase : str = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCamelCase_ )
def _lowerCamelCase ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowerCamelCase ( self , UpperCamelCase_ ) -> None:
__lowercase : Dict = self._buckets
__lowercase : Dict = [None] * new_size
__lowercase : Dict = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _lowerCamelCase ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def _lowerCamelCase ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Iterator[int]:
__lowercase : Dict = self._get_bucket_index(UpperCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase : Any = self._get_next_ind(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> None:
for ind in self._iterate_buckets(UpperCamelCase_ ):
if self._try_set(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
break
def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(UpperCamelCase_ , UpperCamelCase_ )
def __delitem__( self , UpperCamelCase_ ) -> None:
for ind in self._iterate_buckets(UpperCamelCase_ ):
__lowercase : str = self._buckets[ind]
if item is None:
raise KeyError(UpperCamelCase_ )
if item is _deleted:
continue
if item.key == key:
__lowercase : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , UpperCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(UpperCamelCase_ ):
__lowercase : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
__lowercase : Union[str, Any] = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 76
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
a_ = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
a_ = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
a_ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(snake_case )
class UpperCAmelCase_ :
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
elif titles is None or texts is None:
__lowercase : int = titles if texts is None else texts
return super().__call__(
UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Optional[int] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles]
__lowercase : Optional[int] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts]
__lowercase : str = len(UpperCamelCase_ )
__lowercase : List[Any] = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
F"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" )
__lowercase : int = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids''']
__lowercase : List[Any] = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids''']
__lowercase : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ )
]
}
if return_attention_mask is not False:
__lowercase : str = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase : List[str] = attention_mask
return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ) -> List[DPRSpanPrediction]:
__lowercase : List[Any] = reader_input['''input_ids''']
__lowercase ,__lowercase ,__lowercase : List[str] = reader_output[:3]
__lowercase : Optional[int] = len(UpperCamelCase_ )
__lowercase : Any = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ )
__lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__lowercase : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
__lowercase : List[Any] = len(UpperCamelCase_ )
__lowercase : List[str] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[DPRSpanPrediction]:
__lowercase : Tuple = []
for start_index, start_score in enumerate(UpperCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase : int = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ )
__lowercase : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
__lowercase : Any = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case )
class UpperCAmelCase_ ( snake_case , snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase =["input_ids", "attention_mask"]
| 76
| 1
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase :
'''simple docstring'''
_A : int = XGLMConfig
_A : Any = {}
_A : Any = '''gelu'''
def __init__( self : int , __a : Dict , __a : int=14 , __a : List[Any]=7 , __a : List[Any]=True , __a : int=True , __a : Optional[int]=True , __a : Union[str, Any]=99 , __a : int=32 , __a : Optional[Any]=2 , __a : Optional[int]=4 , __a : Optional[Any]=37 , __a : int="gelu" , __a : str=0.1 , __a : str=0.1 , __a : int=512 , __a : Dict=0.02 , ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parent
__lowercase : Tuple = batch_size
__lowercase : int = seq_length
__lowercase : int = is_training
__lowercase : Any = use_input_mask
__lowercase : Any = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : Tuple = d_model
__lowercase : List[str] = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = ffn_dim
__lowercase : Union[str, Any] = activation_function
__lowercase : Optional[Any] = activation_dropout
__lowercase : Optional[int] = attention_dropout
__lowercase : List[str] = max_position_embeddings
__lowercase : List[Any] = initializer_range
__lowercase : Any = None
__lowercase : int = 0
__lowercase : Dict = 2
__lowercase : List[Any] = 1
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowercase : int = None
if self.use_input_mask:
__lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Optional[Any] = self.get_config()
__lowercase : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__a , )
def lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Optional[int] = config_and_inputs
__lowercase : Tuple = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_A : Dict = (TFXGLMForCausalLM,) if is_tf_available() else ()
_A : int = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_A : Optional[Any] = False
_A : Dict = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFXGLMModelTester(self )
__lowercase : int = ConfigTester(self , config_class=__a , n_embd=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : int = TFXGLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[Any] , __a : Optional[int]=True ) -> int:
"""simple docstring"""
__lowercase : int = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
__lowercase : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase : Any = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__lowercase : Union[str, Any] = model.generate(__a , do_sample=__a , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __a )
@slow
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
__lowercase : List[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
__lowercase : Optional[Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
__lowercase : str = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
__lowercase : List[str] = model.generate(__a , do_sample=__a , seed=[7, 0] )
__lowercase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=__a )
__lowercase : int = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__a , __a )
@slow
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
__lowercase : Dict = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
__lowercase : Any = """left"""
# use different length sentences to test batching
__lowercase : Any = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
__lowercase : Any = tokenizer(__a , return_tensors="""tf""" , padding=__a )
__lowercase : Union[str, Any] = inputs["""input_ids"""]
__lowercase : Tuple = model.generate(input_ids=__a , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
__lowercase : Dict = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__lowercase : int = model.generate(input_ids=__a , max_new_tokens=12 )
__lowercase : int = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__lowercase : Tuple = model.generate(input_ids=__a , max_new_tokens=12 )
__lowercase : Union[str, Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
__lowercase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
__lowercase : Optional[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
| 649
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 1
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__UpperCamelCase : int = HfApi()
__UpperCamelCase : str = {}
# fmt: off
__UpperCamelCase : str = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
__UpperCamelCase : Tuple = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
__UpperCamelCase : Tuple = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
__UpperCamelCase : int = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
__UpperCamelCase : Any = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
__UpperCamelCase : Optional[int] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
__UpperCamelCase : int = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
__UpperCamelCase : Optional[Any] = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
__UpperCamelCase : Union[str, Any] = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
__UpperCamelCase : List[str] = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
__UpperCamelCase : Dict = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
__UpperCamelCase : str = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
__UpperCamelCase : Tuple = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
__UpperCamelCase : str = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
__UpperCamelCase : Optional[int] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
__UpperCamelCase : Optional[Any] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__UpperCamelCase : List[str] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
__UpperCamelCase : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
__UpperCamelCase : str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__UpperCamelCase : Tuple = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__UpperCamelCase : Any = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__UpperCamelCase : Dict = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 448
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = "distilbert"
a : Union[str, Any] = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=512 ,_lowerCamelCase=False ,_lowerCamelCase=6 ,_lowerCamelCase=12 ,_lowerCamelCase=768 ,_lowerCamelCase=4 * 768 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.2 ,_lowerCamelCase=0 ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = sinusoidal_pos_embds
__lowercase = n_layers
__lowercase = n_heads
__lowercase = dim
__lowercase = hidden_dim
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation
__lowercase = initializer_range
__lowercase = qa_dropout
__lowercase = seq_classif_dropout
super().__init__(**_lowerCamelCase ,pad_token_id=_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 502
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : List[str] = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 165
|
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
UpperCAmelCase_ : Tuple = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
UpperCAmelCase_ : List[Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def _lowerCAmelCase(a : Union[str, Any] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE =numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=a )[0]
@deprecated(a , '''Please use tf.data to implement this functionality.''' )
def _lowerCAmelCase(a : str ) -> Optional[int]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=a ) as bytestream:
_SCREAMING_SNAKE_CASE =_readaa(a )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =bytestream.read(rows * cols * num_images )
_SCREAMING_SNAKE_CASE =numpy.frombuffer(a , dtype=numpy.uinta )
_SCREAMING_SNAKE_CASE =data.reshape(a , a , a , 1 )
return data
@deprecated(a , '''Please use tf.one_hot on tensors.''' )
def _lowerCAmelCase(a : Tuple , a : Dict ) -> Dict:
_SCREAMING_SNAKE_CASE =labels_dense.shape[0]
_SCREAMING_SNAKE_CASE =numpy.arange(a ) * num_classes
_SCREAMING_SNAKE_CASE =numpy.zeros((num_labels, num_classes) )
_SCREAMING_SNAKE_CASE =1
return labels_one_hot
@deprecated(a , '''Please use tf.data to implement this functionality.''' )
def _lowerCAmelCase(a : Any , a : Any=False , a : Tuple=10 ) -> Optional[int]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=a ) as bytestream:
_SCREAMING_SNAKE_CASE =_readaa(a )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =bytestream.read(a )
_SCREAMING_SNAKE_CASE =numpy.frombuffer(a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(a , a )
return labels
class __UpperCAmelCase :
'''simple docstring'''
@deprecated(
_A , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , _A , _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =random_seed.get_seed(_A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_SCREAMING_SNAKE_CASE =dtypes.as_dtype(_A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
_SCREAMING_SNAKE_CASE =1_0_0_0_0
_SCREAMING_SNAKE_CASE =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
_SCREAMING_SNAKE_CASE =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_SCREAMING_SNAKE_CASE =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_SCREAMING_SNAKE_CASE =images.astype(numpy.floataa )
_SCREAMING_SNAKE_CASE =numpy.multiply(_A , 1.0 / 255.0 )
_SCREAMING_SNAKE_CASE =images
_SCREAMING_SNAKE_CASE =labels
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._images
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._labels
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._num_examples
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._epochs_completed
def UpperCamelCase_ ( self , _A , _A=False , _A=True ):
'''simple docstring'''
if fake_data:
_SCREAMING_SNAKE_CASE =[1] * 7_8_4
_SCREAMING_SNAKE_CASE =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_A )],
[fake_label for _ in range(_A )],
)
_SCREAMING_SNAKE_CASE =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_SCREAMING_SNAKE_CASE =numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_SCREAMING_SNAKE_CASE =self.images[perma]
_SCREAMING_SNAKE_CASE =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_SCREAMING_SNAKE_CASE =self._num_examples - start
_SCREAMING_SNAKE_CASE =self._images[start : self._num_examples]
_SCREAMING_SNAKE_CASE =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_SCREAMING_SNAKE_CASE =numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_SCREAMING_SNAKE_CASE =self.images[perm]
_SCREAMING_SNAKE_CASE =self.labels[perm]
# Start next epoch
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =batch_size - rest_num_examples
_SCREAMING_SNAKE_CASE =self._index_in_epoch
_SCREAMING_SNAKE_CASE =self._images[start:end]
_SCREAMING_SNAKE_CASE =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_SCREAMING_SNAKE_CASE =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(a , '''Please write your own downloading logic.''' )
def _lowerCAmelCase(a : Any , a : str , a : Optional[int] ) -> Optional[Any]:
if not gfile.Exists(a ):
gfile.MakeDirs(a )
_SCREAMING_SNAKE_CASE =os.path.join(a , a )
if not gfile.Exists(a ):
urllib.request.urlretrieve(a , a ) # noqa: S310
with gfile.GFile(a ) as f:
_SCREAMING_SNAKE_CASE =f.size()
print('''Successfully downloaded''' , a , a , '''bytes.''' )
return filepath
@deprecated(
a , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def _lowerCAmelCase(a : Optional[int] , a : Tuple=False , a : str=False , a : Union[str, Any]=dtypes.floataa , a : Tuple=True , a : Tuple=5000 , a : Union[str, Any]=None , a : List[str]=DEFAULT_SOURCE_URL , ) -> List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=a , one_hot=a , dtype=a , seed=a )
_SCREAMING_SNAKE_CASE =fake()
_SCREAMING_SNAKE_CASE =fake()
_SCREAMING_SNAKE_CASE =fake()
return _Datasets(train=a , validation=a , test=a )
if not source_url: # empty string check
_SCREAMING_SNAKE_CASE =DEFAULT_SOURCE_URL
_SCREAMING_SNAKE_CASE ='''train-images-idx3-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''train-labels-idx1-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''t10k-images-idx3-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''t10k-labels-idx1-ubyte.gz'''
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + train_images_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_images(a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + train_labels_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_labels(a , one_hot=a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + test_images_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_images(a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + test_labels_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_labels(a , one_hot=a )
if not 0 <= validation_size <= len(a ):
_SCREAMING_SNAKE_CASE =(
'''Validation size should be between 0 and '''
f"""{len(a )}. Received: {validation_size}."""
)
raise ValueError(a )
_SCREAMING_SNAKE_CASE =train_images[:validation_size]
_SCREAMING_SNAKE_CASE =train_labels[:validation_size]
_SCREAMING_SNAKE_CASE =train_images[validation_size:]
_SCREAMING_SNAKE_CASE =train_labels[validation_size:]
_SCREAMING_SNAKE_CASE ={'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
return _Datasets(train=a , validation=a , test=a )
| 165
| 1
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : List[str] ,_a : Dict ,_a : Tuple ,_a : Union[str, Any] = None ,_a : List[str] = None ):
'''simple docstring'''
_a : List[str] = None
_a : List[Any] = os.path.abspath(os.path.join('examples' ,'by_feature' ) )
_a : Optional[int] = os.path.abspath('examples' )
for item in os.listdir(a_ ):
if item not in EXCLUDE_EXAMPLES:
_a : List[str] = os.path.join(a_ ,a_ )
if os.path.isfile(a_ ) and ".py" in item_path:
with self.subTest(
tested_script=a_ ,feature_script=a_ ,tested_section='main()' if parser_only else 'training_function()' ,):
_a : Optional[Any] = compare_against_test(
os.path.join(a_ ,a_ ) ,a_ ,a_ ,a_ )
_a : Optional[Any] = "\n".join(a_ )
if special_strings is not None:
for string in special_strings:
_a : Tuple = diff.replace(a_ ,'' )
self.assertEqual(a_ ,'' )
def __lowercase ( self : str ):
'''simple docstring'''
self.one_complete_example('complete_nlp_example.py' ,a_ )
self.one_complete_example('complete_nlp_example.py' ,a_ )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = os.path.abspath(os.path.join('examples' ,'cv_example.py' ) )
_a : str = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example('complete_cv_example.py' ,a_ ,a_ ,a_ )
self.one_complete_example('complete_cv_example.py' ,a_ ,a_ ,a_ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase__ ( a_ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = False
@classmethod
def __lowercase ( cls : List[str] ):
'''simple docstring'''
super().setUpClass()
_a : List[str] = tempfile.mkdtemp()
_a : Union[str, Any] = os.path.join(cls._tmpdir ,'default_config.yml' )
write_basic_config(save_location=cls.configPath )
_a : Optional[int] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __lowercase ( cls : List[str] ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[int] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,'epoch_0' ) ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
_a : Any = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,'step_2' ) ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Dict = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir ,'epoch_0' )}
""".split()
_a : str = run_command(self._launch_args + testargs ,return_stdout=a_ )
self.assertNotIn('epoch 0:' ,a_ )
self.assertIn('epoch 1:' ,a_ )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir ,'step_2' )}
""".split()
_a : Tuple = run_command(self._launch_args + testargs ,return_stdout=a_ )
if torch.cuda.is_available():
_a : Tuple = torch.cuda.device_count()
else:
_a : Optional[int] = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' ,a_ )
self.assertIn('epoch 1:' ,a_ )
else:
self.assertIn('epoch 0:' ,a_ )
self.assertIn('epoch 1:' ,a_ )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ ,{'TESTING_MOCKED_DATALOADERS': '0'} ):
_a : List[Any] = run_command(self._launch_args + testargs ,return_stdout=a_ )
_a : Tuple = re.findall('({.+})' ,a_ )
_a : int = [r for r in results if "accuracy" in r][-1]
_a : Union[str, Any] = ast.literal_eval(a_ )
self.assertGreaterEqual(results['accuracy'] ,0.75 )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Dict = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ ,{'WANDB_MODE': 'offline'} )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
_a : Union[str, Any] = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(a_ ,'tracking' ) ) )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 229
|
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( a_ ):
__lowerCAmelCase = ["input_values", "attention_mask"]
def __init__( self , a_ = 1 , a_ = 1_6_0_0_0 , a_ = 0.0 , a_ = False , a_ = 8_0 , a_ = 1_6 , a_ = 6_4 , a_ = "hann_window" , a_ = 1.0 , a_ = 8_0 , a_ = 7_6_0_0 , a_ = 1e-10 , a_ = 2 , a_ = True , **a_ , ):
super().__init__(feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ )
a_ : Optional[Any] = do_normalize
a_ : Any = return_attention_mask
a_ : int = num_mel_bins
a_ : int = hop_length
a_ : List[str] = win_length
a_ : Dict = win_function
a_ : Optional[Any] = frame_signal_scale
a_ : List[str] = fmin
a_ : Any = fmax
a_ : str = mel_floor
a_ : int = reduction_factor
a_ : Tuple = win_length * sampling_rate // 1_0_0_0
a_ : int = hop_length * sampling_rate // 1_0_0_0
a_ : Dict = optimal_fft_length(self.sample_size )
a_ : int = (self.n_fft // 2) + 1
a_ : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=a_ )
a_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , a_ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , a_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case_ ( a_ , a_ , a_ = 0.0 ):
if attention_mask is not None:
a_ : int = np.array(a_ , np.intaa )
a_ : Tuple = []
for vector, length in zip(a_ , attention_mask.sum(-1 ) ):
a_ : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
a_ : Tuple = padding_value
normed_input_values.append(a_ )
else:
a_ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def snake_case_ ( self , a_ , ):
a_ : Optional[Any] = spectrogram(
a_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , a_ = None , a_ = None , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , a_ = None , **a_ , ):
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a_ : int = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
else:
a_ : Optional[Any] = None
if audio_target is not None:
a_ : Optional[Any] = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
if inputs is None:
return inputs_target
else:
a_ : Dict = inputs_target["input_values"]
a_ : int = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a_ : List[Any] = decoder_attention_mask
return inputs
def snake_case_ ( self , a_ , a_ = False , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , **a_ , ):
a_ : List[str] = isinstance(a_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
a_ : Optional[int] = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ : str = [np.asarray(a_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
a_ : Dict = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a_ : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
a_ : List[str] = self.feature_size
# convert into correct format for padding
if is_target:
a_ : Dict = [self._extract_mel_features(a_ ) for waveform in speech]
a_ : List[Any] = BatchFeature({"input_values": features} )
a_ : str = self.num_mel_bins
else:
a_ : List[str] = BatchFeature({"input_values": speech} )
a_ : Any = self.pad(
a_ , padding=a_ , max_length=a_ , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , **a_ , )
a_ : Tuple = feature_size_hack
# convert input values to correct format
a_ : Union[str, Any] = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a_ : str = [np.asarray(a_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(a_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a_ : Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(a_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a_ : int = input_values.astype(np.floataa )
# convert attention_mask to correct format
a_ : Union[str, Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a_ : Union[str, Any] = [np.asarray(a_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a_ : int = (
attention_mask
if self._get_padding_strategies(a_ , max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a_ : Dict = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=a_ , padding_value=self.padding_value )
if return_tensors is not None:
a_ : Optional[Any] = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def snake_case_ ( self ):
a_ : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a_ : List[str] = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 237
| 0
|
"""simple docstring"""
def _a ( _snake_case = 10 , _snake_case = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , _snake_case )
UpperCAmelCase = range(1 , _snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 74
|
"""simple docstring"""
import math
def _a ( _snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _snake_case = 0.1 ):
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 1
|
'''simple docstring'''
from math import isqrt
def a_ ( UpperCamelCase_ ):
A_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase_ , UpperCamelCase_ ):
A_ = False
return [i for i in range(2 , UpperCamelCase_ ) if is_prime[i]]
def a_ ( UpperCamelCase_ = 1_0**8 ):
A_ = calculate_prime_numbers(max_number // 2 )
A_ = 0
A_ = 0
A_ = len(UpperCamelCase_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 452
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def a_ ( UpperCamelCase_ ):
A_ = botoa.client("iam" )
A_ = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=UpperCamelCase_ , AssumeRolePolicyDocument=json.dumps(UpperCamelCase_ , indent=2 ) )
A_ = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=UpperCamelCase_ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(UpperCamelCase_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def a_ ( UpperCamelCase_ ):
A_ = botoa.client("iam" )
return iam_client.get_role(RoleName=UpperCamelCase_ )["Role"]["Arn"]
def a_ ( ):
A_ = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , UpperCamelCase_ , )
A_ = None
if credentials_configuration == 0:
A_ = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
A_ = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
A_ = _ask_field("AWS Access Key ID: " )
A_ = aws_access_key_id
A_ = _ask_field("AWS Secret Access Key: " )
A_ = aws_secret_access_key
A_ = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
A_ = aws_region
A_ = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , UpperCamelCase_ , )
if role_management == 0:
A_ = _ask_field("Enter your IAM role name: " )
else:
A_ = "accelerate_sagemaker_execution_role"
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(UpperCamelCase_ )
A_ = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , )
A_ = None
if is_custom_docker_image:
A_ = _ask_field("Enter your Docker image: " , lambda UpperCamelCase_ : str(UpperCamelCase_ ).lower() )
A_ = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , )
A_ = None
if is_sagemaker_inputs_enabled:
A_ = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda UpperCamelCase_ : str(UpperCamelCase_ ).lower() , )
A_ = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , )
A_ = None
if is_sagemaker_metrics_enabled:
A_ = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda UpperCamelCase_ : str(UpperCamelCase_ ).lower() , )
A_ = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
A_ = {}
A_ = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , )
if use_dynamo:
A_ = "dynamo_"
A_ = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
A_ = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , )
if use_custom_options:
A_ = _ask_options(
"Which mode do you want to use?" , UpperCamelCase_ , lambda UpperCamelCase_ : TORCH_DYNAMO_MODES[int(UpperCamelCase_ )] , default="default" , )
A_ = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , )
A_ = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , )
A_ = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
A_ = _ask_options(
UpperCamelCase_ , UpperCamelCase_ , lambda UpperCamelCase_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCamelCase_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
A_ = _ask_field(UpperCamelCase_ , lambda UpperCamelCase_ : str(UpperCamelCase_ ).lower() , default="ml.p3.2xlarge" )
A_ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
A_ = _ask_field(
"How many machines do you want use? [1]: " , UpperCamelCase_ , default=1 , )
A_ = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=UpperCamelCase_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCamelCase_ , use_cpu=UpperCamelCase_ , dynamo_config=UpperCamelCase_ , eca_instance_type=UpperCamelCase_ , profile=UpperCamelCase_ , region=UpperCamelCase_ , iam_role_name=UpperCamelCase_ , mixed_precision=UpperCamelCase_ , num_machines=UpperCamelCase_ , sagemaker_inputs_file=UpperCamelCase_ , sagemaker_metrics_file=UpperCamelCase_ , )
| 452
| 1
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase__ : str = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 486
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModel.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModel.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForPreTraining.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForQuestionAnswering.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
| 486
| 1
|
def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int = 0 ):
"""simple docstring"""
a__ : Optional[Any] =length or len(SCREAMING_SNAKE_CASE )
a__ : int =False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
a__ , a__ : Any =list_data[i + 1], list_data[i]
a__ : Optional[Any] =True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563
|
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(SCREAMING_SNAKE_CASE ) )
def _A ( ):
"""simple docstring"""
a__ : Any =7 * factorial(9 ) + 1
return sum(i for i in range(3 , SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(SCREAMING_SNAKE_CASE ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 563
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Dict =[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =0, 0, 0
lowerCAmelCase_ : Union[str, Any] =ugly_nums[ia] * 2
lowerCAmelCase_ : Union[str, Any] =ugly_nums[ia] * 3
lowerCAmelCase_ : Any =ugly_nums[ia] * 5
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Optional[Any] =min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ugly_nums.append(_SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
lowerCAmelCase_ : str =ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase_ : Dict =ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase_ : List[Any] =ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(2_00) = }')
| 305
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = GPTSwaTokenizer
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : List[Any] = True
_UpperCamelCase : List[str] = False
def __A ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Union[str, Any] =GPTSwaTokenizer(UpperCamelCase_ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : List[str] , UpperCamelCase_ : str ):
lowerCAmelCase_ : Optional[int] ='''This is a test'''
lowerCAmelCase_ : int ='''This is a test'''
return input_text, output_text
def __A ( self : List[str] ):
lowerCAmelCase_ : List[str] ='''<s>'''
lowerCAmelCase_ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCamelCase_ ) , 2000 )
def __A ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : int =GPTSwaTokenizer(UpperCamelCase_ )
lowerCAmelCase_ : Any =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase_ : List[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
UpperCamelCase_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
lowerCAmelCase_ : Dict =tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase_ : int =tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
# fmt: off
self.assertListEqual(
UpperCamelCase_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def __A ( self : str ):
lowerCAmelCase_ : List[Any] =GPTSwaTokenizer(UpperCamelCase_ )
lowerCAmelCase_ : str =['''This is a test''', '''I was born in 92000, and this is falsé.''']
lowerCAmelCase_ : List[Any] =[
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase_ ) , UpperCamelCase_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase_ ) , UpperCamelCase_ )
@slow
def __A ( self : List[Any] ):
lowerCAmelCase_ : Union[str, Any] =[
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
lowerCAmelCase_ : List[str] ={'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=UpperCamelCase_ , )
| 305
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __lowercase ):
_lowercase ="beit"
def __init__( self , _UpperCamelCase=8_192 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3_072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=224 , _UpperCamelCase=16 , _UpperCamelCase=3 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=True , _UpperCamelCase=[3, 5, 7, 11] , _UpperCamelCase=[1, 2, 3, 6] , _UpperCamelCase=True , _UpperCamelCase=0.4 , _UpperCamelCase=256 , _UpperCamelCase=1 , _UpperCamelCase=False , _UpperCamelCase=255 , **_UpperCamelCase , ) -> Tuple:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = use_mask_token
lowerCAmelCase_ = use_absolute_position_embeddings
lowerCAmelCase_ = use_relative_position_bias
lowerCAmelCase_ = use_shared_relative_position_bias
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ = out_indices
lowerCAmelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ = use_auxiliary_head
lowerCAmelCase_ = auxiliary_loss_weight
lowerCAmelCase_ = auxiliary_channels
lowerCAmelCase_ = auxiliary_num_convs
lowerCAmelCase_ = auxiliary_concat_input
lowerCAmelCase_ = semantic_loss_ignore_index
class _lowerCAmelCase ( __lowercase ):
_lowercase =version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __a ( self ) -> float:
return 1e-4
| 290
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : Any ,_snake_case : Any=13 ,_snake_case : Dict=7 ,_snake_case : Any=6 ,_snake_case : List[str]=17 ,_snake_case : Any=23 ,_snake_case : int=11 ,_snake_case : Union[str, Any]=True ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = parent
lowercase__ : List[str] = batch_size
lowercase__ : Union[str, Any] = seq_length
lowercase__ : Any = act_dim
lowercase__ : int = state_dim
lowercase__ : str = hidden_size
lowercase__ : Optional[Any] = max_length
lowercase__ : Dict = is_training
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase__ : int = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase__ : Any = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ : str = ids_tensor((self.batch_size, self.seq_length) ,vocab_size=1_000 )
lowercase__ : Union[str, Any] = random_attention_mask((self.batch_size, self.seq_length) )
lowercase__ : List[str] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size ,seq_length=self.seq_length ,act_dim=self.act_dim ,state_dim=self.state_dim ,hidden_size=self.hidden_size ,max_length=self.max_length ,)
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[Any] ,_snake_case : List[str] ,_snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : Dict ,_snake_case : int ,_snake_case : Any ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = DecisionTransformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
self.parent.assertEqual(result.state_preds.shape ,states.shape )
self.parent.assertEqual(result.action_preds.shape ,actions.shape )
self.parent.assertEqual(result.return_preds.shape ,returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
(
lowercase__
) : int = config_and_inputs
lowercase__ : List[str] = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase : Tuple = ()
lowerCAmelCase : Dict = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase : Any = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : int = DecisionTransformerModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = DecisionTransformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(_snake_case )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(_snake_case )] ,_snake_case )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
lowercase__ : Tuple = 10 # defined by the RL environment, may be normalized
lowercase__ : Optional[Any] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
lowercase__ : int = model.to(_snake_case )
lowercase__ : Dict = model.config
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = torch.randn(1 ,1 ,config.state_dim ).to(device=_snake_case ,dtype=torch.floataa ) # env.reset()
lowercase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] ,device=_snake_case )
lowercase__ : Tuple = torch.tensor(_snake_case ,device=_snake_case ,dtype=torch.floataa ).reshape(1 ,1 ,1 )
lowercase__ : str = state
lowercase__ : Any = torch.zeros(1 ,0 ,config.act_dim ,device=_snake_case ,dtype=torch.floataa )
lowercase__ : int = torch.zeros(1 ,0 ,device=_snake_case ,dtype=torch.floataa )
lowercase__ : Union[str, Any] = torch.tensor(0 ,device=_snake_case ,dtype=torch.long ).reshape(1 ,1 )
for step in range(_snake_case ):
lowercase__ : List[str] = torch.cat([actions, torch.zeros(1 ,1 ,config.act_dim ,device=_snake_case )] ,dim=1 )
lowercase__ : List[Any] = torch.cat([rewards, torch.zeros(1 ,1 ,device=_snake_case )] ,dim=1 )
lowercase__ : int = torch.ones(1 ,states.shape[1] ).to(dtype=torch.long ,device=states.device )
with torch.no_grad():
lowercase__ : int = model(
states=_snake_case ,actions=_snake_case ,rewards=_snake_case ,returns_to_go=_snake_case ,timesteps=_snake_case ,attention_mask=_snake_case ,return_dict=_snake_case ,)
self.assertEqual(action_pred.shape ,actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] ,expected_outputs[step] ,atol=1e-4 ) )
lowercase__ : Optional[Any] = ( # env.step(action)
torch.randn(1 ,1 ,config.state_dim ).to(device=_snake_case ,dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase__ : int = action_pred[0, -1]
lowercase__ : Optional[Any] = torch.cat([states, state] ,dim=1 )
lowercase__ : Optional[Any] = returns_to_go[0, -1] - reward
lowercase__ : Dict = torch.cat([returns_to_go, pred_return.reshape(1 ,1 ,1 )] ,dim=1 )
lowercase__ : Union[str, Any] = torch.cat(
[timesteps, torch.ones((1, 1) ,device=_snake_case ,dtype=torch.long ) * (step + 1)] ,dim=1 )
| 700
|
"""simple docstring"""
import re
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
lowercase__ : Optional[Any] = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 122
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """gptsan-japanese"""
__lowerCamelCase = [
"""past_key_values""",
]
__lowerCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , __a : Dict=36000 , __a : str=1280 , __a : Union[str, Any]=1024 , __a : Optional[Any]=8192 , __a : int=4096 , __a : Tuple=128 , __a : Tuple=10 , __a : Any=0 , __a : List[Any]=16 , __a : Optional[Any]=16 , __a : Union[str, Any]=128 , __a : int=0.0 , __a : List[str]=1e-5 , __a : List[str]=False , __a : Optional[int]=0.0 , __a : Optional[int]="float32" , __a : Optional[Any]=False , __a : int=False , __a : Optional[Any]=False , __a : Optional[Any]=0.002 , __a : Union[str, Any]=False , __a : int=True , __a : Tuple=35998 , __a : int=35995 , __a : Dict=35999 , **__a : List[Any] , ):
'''simple docstring'''
lowerCamelCase__: List[Any] = vocab_size
lowerCamelCase__: Tuple = max_position_embeddings
lowerCamelCase__: Dict = d_model
lowerCamelCase__: str = d_ff
lowerCamelCase__: Optional[Any] = d_ext
lowerCamelCase__: str = d_spout
lowerCamelCase__: Any = num_switch_layers
lowerCamelCase__: List[Any] = num_ext_layers
lowerCamelCase__: List[str] = num_switch_layers + num_ext_layers
lowerCamelCase__: List[str] = num_heads
lowerCamelCase__: int = num_experts
lowerCamelCase__: Union[str, Any] = expert_capacity
lowerCamelCase__: Union[str, Any] = dropout_rate
lowerCamelCase__: List[Any] = layer_norm_epsilon
lowerCamelCase__: List[Any] = router_bias
lowerCamelCase__: Tuple = router_jitter_noise
lowerCamelCase__: int = router_dtype
lowerCamelCase__: Dict = router_ignore_padding_tokens
lowerCamelCase__: Any = output_hidden_states
lowerCamelCase__: List[Any] = output_attentions
lowerCamelCase__: Dict = initializer_factor
lowerCamelCase__: Optional[Any] = output_router_logits
lowerCamelCase__: int = use_cache
super().__init__(
separator_token_id=__a , pad_token_id=__a , eos_token_id=__a , **__a , )
| 306
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = 42
class lowerCamelCase__ ( A__ , A__ ):
@register_to_config
def __init__( self : Optional[int] , __a : int = 16 , __a : int = 88 , __a : Optional[int] = None , __a : Optional[int] = None , __a : int = 1 , __a : float = 0.0 , __a : int = 32 , __a : Optional[int] = None , __a : bool = False , __a : Optional[int] = None , __a : str = "geglu" , __a : bool = True , __a : bool = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple = num_attention_heads
lowerCamelCase__: Dict = attention_head_dim
lowerCamelCase__: int = num_attention_heads * attention_head_dim
lowerCamelCase__: List[Any] = in_channels
lowerCamelCase__: List[Any] = torch.nn.GroupNorm(num_groups=__a , num_channels=__a , eps=1e-6 , affine=__a )
lowerCamelCase__: Any = nn.Linear(__a , __a )
# 3. Define transformers blocks
lowerCamelCase__: Any = nn.ModuleList(
[
BasicTransformerBlock(
__a , __a , __a , dropout=__a , cross_attention_dim=__a , activation_fn=__a , attention_bias=__a , double_self_attention=__a , norm_elementwise_affine=__a , )
for d in range(__a )
] )
lowerCamelCase__: int = nn.Linear(__a , __a )
def lowerCamelCase_ ( self : Any , __a : Any , __a : int=None , __a : List[Any]=None , __a : Dict=None , __a : Optional[int]=1 , __a : Dict=None , __a : bool = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] = hidden_states.shape
lowerCamelCase__: Any = batch_frames // num_frames
lowerCamelCase__: Optional[int] = hidden_states
lowerCamelCase__: int = hidden_states[None, :].reshape(__a , __a , __a , __a , __a )
lowerCamelCase__: Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__: int = self.norm(__a )
lowerCamelCase__: Union[str, Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __a , __a )
lowerCamelCase__: Dict = self.proj_in(__a )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__: Union[str, Any] = block(
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , class_labels=__a , )
# 3. Output
lowerCamelCase__: int = self.proj_out(__a )
lowerCamelCase__: List[Any] = (
hidden_states[None, None, :]
.reshape(__a , __a , __a , __a , __a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__: str = hidden_states.reshape(__a , __a , __a , __a )
lowerCamelCase__: str = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__a )
| 306
| 1
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def a ( __UpperCAmelCase : int = 8 ) -> str:
__magic_name__: Union[str, Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
def a ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCAmelCase )
__magic_name__: Tuple = i // 3
__magic_name__: Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__magic_name__: Union[str, Any] = (
chars_incl
+ random(__UpperCAmelCase , quotient + remainder )
+ random(__UpperCAmelCase , __UpperCAmelCase )
+ random(__UpperCAmelCase , __UpperCAmelCase )
)
__magic_name__: Optional[int] = list(__UpperCAmelCase )
shuffle(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
# random is a generalised function for letters, characters and numbers
def a ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
return "".join(secrets.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
pass # Put your code here...
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Optional[Any]:
pass # Put your code here...
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> int:
pass # Put your code here...
def a ( __UpperCAmelCase : str , __UpperCAmelCase : int = 8 ) -> bool:
if len(__UpperCAmelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
__magic_name__: List[str] = any(char in ascii_uppercase for char in password )
__magic_name__: Optional[Any] = any(char in ascii_lowercase for char in password )
__magic_name__: Tuple = any(char in digits for char in password )
__magic_name__: Any = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def a ( ) -> str:
__magic_name__: str = int(input("""Please indicate the max length of your password: """ ).strip() )
__magic_name__: Optional[int] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(__UpperCAmelCase ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(__UpperCAmelCase , __UpperCAmelCase ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 213
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str ) -> Any:
# Initialise PyTorch model
__magic_name__: Optional[Any] = BertConfig.from_json_file(__UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__: int = BertForPreTraining(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 213
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class snake_case ( lowercase ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return 0.0
def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int ):
lowerCamelCase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCamelCase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __snake_case ( UpperCAmelCase_ : FilterType , UpperCAmelCase_ : int ):
lowerCamelCase_ = 512
lowerCamelCase_ = [1] + [0] * (size - 1)
lowerCamelCase_ = [filter_type.process(UpperCAmelCase_ ) for item in inputs]
lowerCamelCase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ = np.abs(np.fft.fft(UpperCAmelCase_ ) )
lowerCamelCase_ = 20 * np.logaa(UpperCAmelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowerCamelCase_ = get_bounds(UpperCAmelCase_ , UpperCAmelCase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(UpperCAmelCase_ )
plt.show()
def __snake_case ( UpperCAmelCase_ : FilterType , UpperCAmelCase_ : int ):
lowerCamelCase_ = 512
lowerCamelCase_ = [1] + [0] * (size - 1)
lowerCamelCase_ = [filter_type.process(UpperCAmelCase_ ) for item in inputs]
lowerCamelCase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ = np.angle(np.fft.fft(UpperCAmelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(UpperCAmelCase_ , -2 * pi ) )
plt.show()
| 675
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : Optional[Any] = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675
| 1
|
'''simple docstring'''
from math import loga
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase = 'roberta'
elif args.model_type == "gpt2":
UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCAmelCase = 'transformer'
UpperCAmelCase = model.state_dict()
UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCAmelCase = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCAmelCase = F'''{prefix}.embeddings.{w}.weight'''
UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCAmelCase = F'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCAmelCase = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCAmelCase = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[F'''lm_head.dense.{w}''']
UpperCAmelCase = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[F'''{prefix}.ln_f.{w}''']
UpperCAmelCase = state_dict['lm_head.weight']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 344
| 0
|
'''simple docstring'''
import math
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : Union[str, Any] =input('''Enter message: ''' )
lowercase : List[Any] =int(input(F'Enter key [2-{len(__A ) - 1}]: ' ) )
lowercase : int =input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase : Union[str, Any] =encrypt_message(__A , __A )
elif mode.lower().startswith('''d''' ):
lowercase : Dict =decrypt_message(__A , __A )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def lowercase_ ( __A : int , __A : str ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] =[''''''] * key
for col in range(__A ):
lowercase : Optional[Any] =col
while pointer < len(__A ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__A )
def lowercase_ ( __A : int , __A : str ) -> str:
"""simple docstring"""
lowercase : Tuple =math.ceil(len(__A ) / key )
lowercase : Union[str, Any] =key
lowercase : Optional[int] =(num_cols * num_rows) - len(__A )
lowercase : List[Any] =[''''''] * num_cols
lowercase : Dict =0
lowercase : List[str] =0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowercase : Optional[int] =0
row += 1
return "".join(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__magic_name__ = datasets.load_iris()
__magic_name__ = np.array(data["data"])
__magic_name__ = np.array(data["target"])
__magic_name__ = data["target_names"]
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = train_test_split(X, y)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return np.linalg.norm(np.array(UpperCamelCase_ ) - np.array(UpperCamelCase_ ) )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=5 ):
__SCREAMING_SNAKE_CASE = zip(UpperCamelCase_ , UpperCamelCase_ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE = []
for data_point in data:
__SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , UpperCamelCase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE = [i[1] for i in sorted(UpperCamelCase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE = Counter(UpperCamelCase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 155
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class A__ ( __UpperCAmelCase ):
lowerCamelCase__ : Any ="""switch_transformers"""
lowerCamelCase__ : Union[str, Any] =["""past_key_values"""]
lowerCamelCase__ : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , lowerCamelCase=32128 , lowerCamelCase=768 , lowerCamelCase=64 , lowerCamelCase=2048 , lowerCamelCase=64 , lowerCamelCase=12 , lowerCamelCase=3 , lowerCamelCase=12 , lowerCamelCase=3 , lowerCamelCase=12 , lowerCamelCase=8 , lowerCamelCase=False , lowerCamelCase=0.0_1 , lowerCamelCase="float32" , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=128 , lowerCamelCase=0.1 , lowerCamelCase=1e-6 , lowerCamelCase=0.0_0_1 , lowerCamelCase=0.0_0_1 , lowerCamelCase=1.0 , lowerCamelCase="relu" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : Tuple = vocab_size
__magic_name__ : Dict = d_model
__magic_name__ : Optional[Any] = d_kv
__magic_name__ : Union[str, Any] = d_ff
__magic_name__ : Any = num_sparse_encoder_layers
__magic_name__ : Optional[int] = num_layers
__magic_name__ : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__magic_name__ : List[str] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__magic_name__ : Tuple = self.num_layers // self.num_sparse_encoder_layers
else:
__magic_name__ : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__magic_name__ : str = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__magic_name__ : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
__magic_name__ : List[Any] = num_heads
__magic_name__ : Any = num_experts
__magic_name__ : str = expert_capacity
__magic_name__ : Any = router_bias
__magic_name__ : Union[str, Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__magic_name__ : Optional[Any] = router_dtype
__magic_name__ : List[Any] = router_ignore_padding_tokens
__magic_name__ : Dict = relative_attention_num_buckets
__magic_name__ : Optional[int] = relative_attention_max_distance
__magic_name__ : int = dropout_rate
__magic_name__ : Any = layer_norm_epsilon
__magic_name__ : List[Any] = initializer_factor
__magic_name__ : Union[str, Any] = feed_forward_proj
__magic_name__ : Optional[Any] = use_cache
__magic_name__ : str = add_router_probs
__magic_name__ : Optional[Any] = router_z_loss_coef
__magic_name__ : List[str] = router_aux_loss_coef
__magic_name__ : str = self.feed_forward_proj.split('''-''' )
__magic_name__ : Union[str, Any] = act_info[-1]
__magic_name__ : int = act_info[0] == '''gated'''
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__magic_name__ : Union[str, Any] = '''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 711
|
import math
import random
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = False ) ->float:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowercase_ = 0.0_2
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->float:
"""simple docstring"""
__magic_name__ : Optional[int] = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(UpperCAmelCase ):
# Forward propagation
__magic_name__ : Optional[Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__magic_name__ : Optional[int] = (expected / 100) - layer_a
# Error delta
__magic_name__ : Tuple = layer_1_error * sigmoid_function(UpperCAmelCase, UpperCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input('''Expected value: '''))
lowercase_ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 336
| 0
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *_lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=True , _lowerCamelCase=2 ) -> Union[str, Any]:
"""simple docstring"""
from .. import __version__
__snake_case : Optional[Any] = take_from
__snake_case : Union[str, Any] = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case : Union[str, Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Dict = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case : str = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : List[str] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : Union[str, Any] = call_frame.lineno
__snake_case : Union[str, Any] = call_frame.function
__snake_case , __snake_case : str = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 26
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52
| 0
|
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , lowerCAmelCase : Any , lowerCAmelCase : Tuple=13 , lowerCAmelCase : List[Any]=30 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : str=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Any=5 , lowerCAmelCase : Any=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Any=3 , lowerCAmelCase : int=0.6 , lowerCAmelCase : Dict=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = mask_ratio
lowerCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowercase ( self : Dict ):
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowercase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ):
lowerCAmelCase = ViTMAEModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ):
lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
lowerCAmelCase = (self.image_size // self.patch_size) ** 2
lowerCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(lowerCAmelCase )
lowerCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ):
_a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_a = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
def __lowercase ( self : Tuple ):
lowerCAmelCase = ViTMAEModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def __lowercase ( self : Union[str, Any] ):
pass
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def __lowercase ( self : List[Any] ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowerCAmelCase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : int ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowerCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase = torch.from_numpy(lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase = pt_noise
super().check_pt_tf_models(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Tuple ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
lowerCAmelCase = outputs[0].cpu().numpy()
lowerCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
lowerCAmelCase = model_class.from_pretrained(lowerCAmelCase )
model.to(lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
# Make sure we don't have nans
lowerCAmelCase = after_outputs[0].cpu().numpy()
lowerCAmelCase = 0
lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowercase ( self : Union[str, Any] ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowercase ( self : Union[str, Any] ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowercase ( self : int ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowercase ( self : str ):
pass
@slow
def __lowercase ( self : Any ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ViTMAEModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase () -> List[Any]:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def __lowercase ( self : Dict ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCAmelCase = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowerCAmelCase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase = ViTMAEConfig()
lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowerCAmelCase , noise=torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase ) )
# verify the logits
lowerCAmelCase = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCAmelCase ) , atol=1e-4 ) )
| 529
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=13 , lowerCAmelCase : List[str]=30 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Any=3 , lowerCAmelCase : str=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Dict=32 , lowerCAmelCase : Dict=5 , lowerCAmelCase : str=4 , lowerCAmelCase : List[Any]=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : List[str]=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 1
def __lowercase ( self : List[str] ):
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Tuple ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ):
lowerCAmelCase = ViTMSNModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : int ):
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = ViTMSNForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = ViTMSNForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def __lowercase ( self : Tuple ):
lowerCAmelCase = ViTMSNModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __lowercase ( self : Optional[Any] ):
pass
def __lowercase ( self : int ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowerCAmelCase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def __lowercase ( self : Optional[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ViTMSNModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase () -> Dict:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Dict ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __lowercase ( self : Tuple ):
torch.manual_seed(2 )
lowerCAmelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(lowerCAmelCase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowerCAmelCase )
# verify the logits
lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 529
| 1
|
from manim import *
class UpperCamelCase ( __a ):
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCamelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCamelCase_ : Union[str, Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCamelCase_ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCamelCase_ : str = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCamelCase_ : List[str] = Text("""CPU""" , font_size=24 )
UpperCamelCase_ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCamelCase_ : List[str] = [mem.copy() for i in range(4 )]
UpperCamelCase_ : Union[str, Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCamelCase_ : Optional[Any] = Text("""GPU""" , font_size=24 )
UpperCamelCase_ : Tuple = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = [mem.copy() for i in range(6 )]
UpperCamelCase_ : Optional[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCamelCase_ : List[str] = Text("""Model""" , font_size=24 )
UpperCamelCase_ : int = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCamelCase_ : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCamelCase_ : Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCamelCase_ : List[Any] = Text("""Loaded Checkpoint""" , font_size=24 )
UpperCamelCase_ : Optional[int] = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCamelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ : List[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : List[str] = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCamelCase_ : int = MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCamelCase_ : str = []
UpperCamelCase_ : Tuple = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCamelCase_ : Dict = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCamelCase_ : Tuple = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 635
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE : List[str] = NewType("DataClass", Any)
SCREAMING_SNAKE_CASE : Optional[int] = NewType("DataClassType", Any)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : list ):
UpperCamelCase_ : Tuple = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda _SCREAMING_SNAKE_CASE : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( *,
_SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : Any = dataclasses.MISSING , _SCREAMING_SNAKE_CASE : Callable[[], Any] = dataclasses.MISSING , _SCREAMING_SNAKE_CASE : dict = None , **_SCREAMING_SNAKE_CASE : List[str] , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCamelCase_ : Union[str, Any] = {}
if aliases is not None:
UpperCamelCase_ : Optional[Any] = aliases
if help is not None:
UpperCamelCase_ : List[str] = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class UpperCamelCase ( __a ):
a__ :Iterable[DataClassType]
def __init__(self , __UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCamelCase_ : int = ArgumentDefaultsHelpFormatter
super().__init__(**__UpperCamelCase )
if dataclasses.is_dataclass(__UpperCamelCase ):
UpperCamelCase_ : str = [dataclass_types]
UpperCamelCase_ : Optional[int] = list(__UpperCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__UpperCamelCase )
@staticmethod
def A_ (__UpperCamelCase , __UpperCamelCase ) -> List[str]:
UpperCamelCase_ : Any = f'''--{field.name}'''
UpperCamelCase_ : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __UpperCamelCase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
UpperCamelCase_ : Any = kwargs.pop("""aliases""" , [] )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : str = [aliases]
UpperCamelCase_ : Optional[int] = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(__UpperCamelCase , """UnionType""" ) and isinstance(__UpperCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__UpperCamelCase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f''' Problem encountered in field \'{field.name}\'.''' )
if type(__UpperCamelCase ) not in field.type.__args__:
# filter `str` in Union
UpperCamelCase_ : str = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCamelCase_ : Tuple = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCamelCase_ : Optional[Any] = (
field.type.__args__[0] if isinstance(__UpperCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCamelCase_ : List[Any] = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCamelCase_ : int = {}
if origin_type is Literal or (isinstance(field.type , __UpperCamelCase ) and issubclass(field.type , __UpperCamelCase )):
if origin_type is Literal:
UpperCamelCase_ : List[str] = field.type.__args__
else:
UpperCamelCase_ : Optional[Any] = [x.value for x in field.type]
UpperCamelCase_ : str = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
UpperCamelCase_ : Tuple = field.default
else:
UpperCamelCase_ : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCamelCase_ : Optional[int] = copy(__UpperCamelCase )
# Hack because type=bool in argparse does not behave as we want.
UpperCamelCase_ : Optional[Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCamelCase_ : List[str] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCamelCase_ : List[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCamelCase_ : Optional[Any] = """?"""
# This is the value that will get picked if we do --field_name (without value)
UpperCamelCase_ : Union[str, Any] = True
elif isclass(__UpperCamelCase ) and issubclass(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Optional[Any] = field.type.__args__[0]
UpperCamelCase_ : str = """+"""
if field.default_factory is not dataclasses.MISSING:
UpperCamelCase_ : List[Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCamelCase_ : Dict = True
else:
UpperCamelCase_ : List[str] = field.type
if field.default is not dataclasses.MISSING:
UpperCamelCase_ : List[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCamelCase_ : Optional[Any] = field.default_factory()
else:
UpperCamelCase_ : Optional[Any] = True
parser.add_argument(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCamelCase_ : str = False
parser.add_argument(f'''--no_{field.name}''' , action="""store_false""" , dest=field.name , **__UpperCamelCase )
def A_ (self , __UpperCamelCase ) -> Dict:
if hasattr(__UpperCamelCase , """_argument_group_name""" ):
UpperCamelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
UpperCamelCase_ : List[str] = self
try:
UpperCamelCase_ : Dict[str, type] = get_type_hints(__UpperCamelCase )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__UpperCamelCase ):
UpperCamelCase_ : Optional[Any] = """.""".join(map(__UpperCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(__UpperCamelCase ):
if not field.init:
continue
UpperCamelCase_ : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(__UpperCamelCase , __UpperCamelCase )
def A_ (self , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCamelCase_ : Optional[int] = []
if args_filename:
args_files.append(Path(__UpperCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCamelCase_ : List[str] = ArgumentParser()
args_file_parser.add_argument(__UpperCamelCase , type=__UpperCamelCase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = args_file_parser.parse_known_args(args=__UpperCamelCase )
UpperCamelCase_ : str = vars(__UpperCamelCase ).get(args_file_flag.lstrip("""-""" ) , __UpperCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(__UpperCamelCase ) for p in cmd_args_file_paths] )
UpperCamelCase_ : Optional[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCamelCase_ : str = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCamelCase_,UpperCamelCase_ : List[str] = self.parse_known_args(args=__UpperCamelCase )
UpperCamelCase_ : List[str] = []
for dtype in self.dataclass_types:
UpperCamelCase_ : Any = {f.name for f in dataclasses.fields(__UpperCamelCase ) if f.init}
UpperCamelCase_ : Optional[int] = {k: v for k, v in vars(__UpperCamelCase ).items() if k in keys}
for k in keys:
delattr(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : List[str] = dtype(**__UpperCamelCase )
outputs.append(__UpperCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__UpperCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def A_ (self , __UpperCamelCase , __UpperCamelCase = False ) -> Tuple[DataClass, ...]:
UpperCamelCase_ : Any = set(args.keys() )
UpperCamelCase_ : List[str] = []
for dtype in self.dataclass_types:
UpperCamelCase_ : str = {f.name for f in dataclasses.fields(__UpperCamelCase ) if f.init}
UpperCamelCase_ : int = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCamelCase_ : Dict = dtype(**__UpperCamelCase )
outputs.append(__UpperCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(__UpperCamelCase )}''' )
return tuple(__UpperCamelCase )
def A_ (self , __UpperCamelCase , __UpperCamelCase = False ) -> Tuple[DataClass, ...]:
with open(Path(__UpperCamelCase ) , encoding="""utf-8""" ) as open_json_file:
UpperCamelCase_ : Tuple = json.loads(open_json_file.read() )
UpperCamelCase_ : List[str] = self.parse_dict(__UpperCamelCase , allow_extra_keys=__UpperCamelCase )
return tuple(__UpperCamelCase )
def A_ (self , __UpperCamelCase , __UpperCamelCase = False ) -> Tuple[DataClass, ...]:
UpperCamelCase_ : Dict = self.parse_dict(yaml.safe_load(Path(__UpperCamelCase ).read_text() ) , allow_extra_keys=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 635
| 1
|
"""simple docstring"""
def __lowerCAmelCase ( __lowerCAmelCase : int = 200 ) -> int:
_UpperCamelCase : str = [1, 2, 5, 10, 20, 50, 100, 200]
_UpperCamelCase : Optional[Any] = [0] * (pence + 1)
_UpperCamelCase : List[str] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 239
|
"""simple docstring"""
from math import ceil
def __lowerCAmelCase ( __lowerCAmelCase : int = 1001 ) -> int:
_UpperCamelCase : Tuple = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_UpperCamelCase : Tuple = 2 * i + 1
_UpperCamelCase : Optional[Any] = 2 * i
_UpperCamelCase : Optional[int] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_SCREAMING_SNAKE_CASE = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 239
| 1
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def __UpperCAmelCase ( __UpperCamelCase ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__lowercase : str = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__lowercase : Union[str, Any] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__lowercase : Optional[Any] = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__lowercase : int = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__lowercase : str = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__lowercase : str = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowercase : List[Any] = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__lowercase : Union[str, Any] = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = {}
import re
__lowercase : Dict = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__lowercase : Any = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowercase : Union[str, Any] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__lowercase : List[Any] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__lowercase : Dict = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowercase : List[str] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__lowercase : Optional[Any] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__lowercase : Tuple = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowercase : List[str] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__UpperCamelCase ):
__lowercase : Any = re_encoder_block_conv_in.match(__UpperCamelCase )
__lowercase : int = regex_match.groups()
__lowercase : List[str] = int(groups[2] ) * 2 + int(groups[3] )
__lowercase : Optional[Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__lowercase : Tuple = re_encoder_block_conv_in.sub(__UpperCamelCase , __UpperCamelCase )
elif re_encoder_block_resnet.fullmatch(__UpperCamelCase ):
__lowercase : List[Any] = re_encoder_block_resnet.match(__UpperCamelCase )
__lowercase : List[Any] = regex_match.groups()
__lowercase : Any = int(groups[2] ) * 2 + int(groups[3] )
__lowercase : Dict = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowercase : Tuple = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__lowercase : Tuple = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__lowercase : List[Any] = prefix + resnet_block
__lowercase : Any = re_encoder_block_resnet.sub(__UpperCamelCase , __UpperCamelCase )
elif re_encoder_block_proj_out.fullmatch(__UpperCamelCase ):
__lowercase : List[str] = re_encoder_block_proj_out.match(__UpperCamelCase )
__lowercase : Optional[int] = regex_match.groups()
__lowercase : Any = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__lowercase : Any = re_encoder_block_proj_out.sub(__UpperCamelCase , __UpperCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__UpperCamelCase ):
__lowercase : List[Any] = re_decoder_block_conv_out.match(__UpperCamelCase )
__lowercase : Union[str, Any] = regex_match.groups()
__lowercase : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowercase : Optional[int] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__lowercase : Union[str, Any] = re_decoder_block_conv_out.sub(__UpperCamelCase , __UpperCamelCase )
elif re_decoder_block_resnet.fullmatch(__UpperCamelCase ):
__lowercase : Any = re_decoder_block_resnet.match(__UpperCamelCase )
__lowercase : int = regex_match.groups()
__lowercase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowercase : List[Any] = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowercase : List[Any] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__lowercase : Union[str, Any] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__lowercase : List[Any] = prefix + resnet_block
__lowercase : List[str] = re_decoder_block_resnet.sub(__UpperCamelCase , __UpperCamelCase )
elif re_decoder_block_proj_in.fullmatch(__UpperCamelCase ):
__lowercase : str = re_decoder_block_proj_in.match(__UpperCamelCase )
__lowercase : List[Any] = regex_match.groups()
__lowercase : List[Any] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__lowercase : Tuple = re_decoder_block_proj_in.sub(__UpperCamelCase , __UpperCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__UpperCamelCase ):
__lowercase : Optional[Any] = re_prior_cond_conv_out.match(__UpperCamelCase )
__lowercase : Optional[int] = regex_match.groups()
__lowercase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowercase : Union[str, Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__lowercase : Dict = re_prior_cond_conv_out.sub(__UpperCamelCase , __UpperCamelCase )
elif re_prior_cond_resnet.fullmatch(__UpperCamelCase ):
__lowercase : int = re_prior_cond_resnet.match(__UpperCamelCase )
__lowercase : int = regex_match.groups()
__lowercase : int = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowercase : List[str] = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowercase : Optional[int] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__lowercase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__lowercase : Dict = prefix + resnet_block
__lowercase : int = re_prior_cond_resnet.sub(__UpperCamelCase , __UpperCamelCase )
elif re_prior_cond_proj_in.fullmatch(__UpperCamelCase ):
__lowercase : Union[str, Any] = re_prior_cond_proj_in.match(__UpperCamelCase )
__lowercase : Optional[int] = regex_match.groups()
__lowercase : Union[str, Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__lowercase : List[str] = re_prior_cond_proj_in.sub(__UpperCamelCase , __UpperCamelCase )
# keep original key
else:
__lowercase : Tuple = original_key
__lowercase : Any = replace_key(__UpperCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
__lowercase : Optional[int] = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__lowercase : str = original_key
__lowercase : int = original_key
__lowercase : List[Any] = value
return new_dict
@torch.no_grad()
def __UpperCAmelCase ( __UpperCamelCase=None , __UpperCamelCase=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
__lowercase : Optional[int] = requests.get(f"""{PREFIX}{file}""" , allow_redirects=__UpperCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=__UpperCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , '''wb''' ).write(r.content )
__lowercase : str = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__lowercase : str = JukeboxConfig.from_pretrained(__UpperCamelCase )
__lowercase : Optional[int] = JukeboxModel(__UpperCamelCase )
__lowercase : List[Any] = []
__lowercase : List[str] = {}
for i, dict_name in enumerate(__UpperCamelCase ):
__lowercase : Dict = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['''model''']
__lowercase : Dict = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__lowercase : Optional[int] = old_dic[k]
elif k.endswith('''.w''' ):
__lowercase : List[str] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowercase : str = old_dic[k]
else:
__lowercase : List[Any] = old_dic[k]
__lowercase : Dict = '''vqvae''' if i == 0 else f"""priors.{3 - i}"""
__lowercase : Tuple = fix_jukebox_keys(__UpperCamelCase , model.state_dict() , __UpperCamelCase , __UpperCamelCase )
weight_dict.append(__UpperCamelCase )
__lowercase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(__UpperCamelCase , __UpperCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
return weight_dict
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 76
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self : Dict , _lowercase : List[str] , _lowercase : Optional[int]=13 , _lowercase : Any=30 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=3 , _lowercase : Dict=True , _lowercase : str=True , _lowercase : Optional[int]=32 , _lowercase : List[str]=5 , _lowercase : Optional[Any]=4 , _lowercase : Optional[Any]=37 , _lowercase : int="gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : List[str]=0.1 , _lowercase : Optional[Any]=10 , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=None , _lowercase : List[str]=2 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = scope
__UpperCAmelCase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase = (image_size // patch_size) ** 2
__UpperCAmelCase = num_patches + 1
def a ( self : Tuple ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Tuple ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a ( self : Optional[int] , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Tuple ):
__UpperCAmelCase = ViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : List[str] , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Optional[int] ):
__UpperCAmelCase = ViTForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase = 1
__UpperCAmelCase = ViTForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a ( self : int , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Optional[Any] ):
__UpperCAmelCase = self.type_sequence_label_size
__UpperCAmelCase = ViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCAmelCase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase = 1
__UpperCAmelCase = ViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self : int ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
a__ : Dict = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ : Union[str, Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
a__ : Optional[int] = True
a__ : int = False
a__ : Tuple = False
a__ : Optional[int] = False
def a ( self : str ):
__UpperCAmelCase = ViTModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def a ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def a ( self : List[Any] ):
pass
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_UpperCAmelCase )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a ( self : int ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def a ( self : str ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def a ( self : List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = ViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowercase__ ( ):
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def a ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def a ( self : List[Any] ):
__UpperCAmelCase = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(_UpperCAmelCase )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**_UpperCAmelCase )
# verify the logits
__UpperCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__UpperCAmelCase = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def a ( self : Dict ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__UpperCAmelCase = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(_UpperCAmelCase )
__UpperCAmelCase = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_80 )
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__UpperCAmelCase = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase )
# verify the logits
__UpperCAmelCase = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , _UpperCAmelCase )
__UpperCAmelCase = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a ( self : Any ):
__UpperCAmelCase = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__UpperCAmelCase = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__UpperCAmelCase = model(_UpperCAmelCase )
| 713
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : int = "realm"
def __init__( self : Optional[int] , _lowercase : Tuple=3_05_22 , _lowercase : List[str]=7_68 , _lowercase : Tuple=1_28 , _lowercase : int=12 , _lowercase : Tuple=12 , _lowercase : List[Any]=8 , _lowercase : Tuple=30_72 , _lowercase : Tuple="gelu_new" , _lowercase : str=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=5_12 , _lowercase : Optional[int]=2 , _lowercase : Any=0.02 , _lowercase : Union[str, Any]=1E-12 , _lowercase : Dict=2_56 , _lowercase : Optional[int]=10 , _lowercase : List[Any]=1E-3 , _lowercase : Optional[int]=5 , _lowercase : List[str]=3_20 , _lowercase : Optional[int]=13_35_37_18 , _lowercase : List[Any]=50_00 , _lowercase : Dict=1 , _lowercase : int=0 , _lowercase : Any=2 , **_lowercase : Optional[Any] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
# Common config
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = retriever_proj_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = num_candidates
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = layer_norm_eps
# Reader config
__UpperCAmelCase = span_hidden_size
__UpperCAmelCase = max_span_width
__UpperCAmelCase = reader_layer_norm_eps
__UpperCAmelCase = reader_beam_size
__UpperCAmelCase = reader_seq_len
# Retrieval config
__UpperCAmelCase = num_block_records
__UpperCAmelCase = searcher_beam_size
| 397
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a ) -> Optional[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
_a : List[str] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 1 , _a = None , _a = 0.0 , _a = 5_0 , _a = None , _a = "pil" , _a = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _a ):
_a : str = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_a : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_a )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_a : int = randn_tensor(_a , generator=_a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_a : Any = self.unet(_a , _a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_a : int = self.scheduler.step(
_a , _a , _a , eta=_a , use_clipped_model_output=_a , generator=_a ).prev_sample
_a : int = (image / 2 + 0.5).clamp(0 , 1 )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a : List[str] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 14
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A_ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
if args.student_type == "roberta":
_snake_case : int = False
elif args.student_type == "gpt2":
_snake_case : Optional[Any] = False
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if args.student_type == "roberta":
_snake_case : int = False
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=snake_case__ , required=snake_case__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=snake_case__ , required=snake_case__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=snake_case__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=snake_case__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=snake_case__ , required=snake_case__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=snake_case__ , type=snake_case__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=snake_case__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=snake_case__ , required=snake_case__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=snake_case__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=snake_case__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=snake_case__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=snake_case__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=snake_case__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=snake_case__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=snake_case__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=snake_case__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=snake_case__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=snake_case__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=snake_case__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=snake_case__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=snake_case__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=snake_case__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=snake_case__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=snake_case__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=snake_case__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=snake_case__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=snake_case__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=snake_case__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=snake_case__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=snake_case__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=snake_case__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=snake_case__ , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=snake_case__ , default=5_00 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=snake_case__ , default=40_00 , help="""Checkpoint interval.""" )
_snake_case : str = parser.parse_args()
sanity_checks(snake_case__ )
# ARGS #
init_gpu_params(snake_case__ )
set_seed(snake_case__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(snake_case__ ) , snake_case__ , indent=4 )
git_log(args.dump_path )
_snake_case , _snake_case , _snake_case : int = MODEL_CLASSES[args.student_type]
_snake_case , _snake_case , _snake_case : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_snake_case : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_snake_case : int = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_snake_case : List[str] = tokenizer.all_special_tokens.index(snake_case__ )
_snake_case : Dict = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
_snake_case : Optional[int] = special_tok_ids
_snake_case : Optional[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
_snake_case : Dict = pickle.load(snake_case__ )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
_snake_case : int = pickle.load(snake_case__ )
_snake_case : List[str] = np.maximum(snake_case__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_snake_case : Any = 0.0 # do not predict special tokens
_snake_case : str = torch.from_numpy(snake_case__ )
else:
_snake_case : Optional[int] = None
_snake_case : List[str] = LmSeqsDataset(params=snake_case__ , data=snake_case__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
_snake_case : Union[str, Any] = student_config_class.from_pretrained(args.student_config )
_snake_case : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
_snake_case : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case__ )
else:
_snake_case : Dict = student_model_class(snake_case__ )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
_snake_case : Dict = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case__ )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case__ , snake_case__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case__ , snake_case__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_snake_case : Union[str, Any] = Distiller(
params=snake_case__ , dataset=snake_case__ , token_probs=snake_case__ , student=snake_case__ , teacher=snake_case__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 609
| 0
|
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _snake_case ( ):
"""simple docstring"""
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=A_ , default=A_ , required=A_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=A_ , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=A_ , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=A_ , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=A_ , default=0 , help="""cuda_id.""" , )
a_ : Any = parser.parse_args()
return args
def _snake_case ( A_ : Optional[int] , A_ : Dict , A_ : Any ):
"""simple docstring"""
if not len(A_ ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
a_ , a_ : Union[str, Any] = imgs[0].size
a_ : str = Image.new("""RGB""" , size=(cols * w, rows * h) )
a_ , a_ : Tuple = grid.size
for i, img in enumerate(A_ ):
grid.paste(A_ , box=(i % cols * w, i // cols * h) )
return grid
def _snake_case ( A_ : int , A_ : Dict="robotic cat with wings" , A_ : Any=7.5 , A_ : List[Any]=50 , A_ : Optional[Any]=1 , A_ : Dict=42 , ):
"""simple docstring"""
a_ : Optional[Any] = torch.Generator(pipeline.device ).manual_seed(A_ )
a_ : Union[str, Any] = pipeline(
A_ , guidance_scale=A_ , num_inference_steps=A_ , generator=A_ , num_images_per_prompt=A_ , ).images
a_ : Optional[int] = int(math.sqrt(A_ ) )
a_ : List[str] = image_grid(A_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__snake_case: Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__snake_case: List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
__snake_case: Optional[int] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
__snake_case: List[str] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
__snake_case: Optional[int] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
__snake_case: List[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__snake_case: Optional[int] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
__snake_case: str = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
__snake_case: Dict = unet.to(torch.device("cuda", args.cuda_id))
__snake_case: Optional[int] = pipeline.to(unet.device)
__snake_case ,__snake_case: Any = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
__snake_case: Optional[int] = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 460
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _snake_case ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
a_ : List[str] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , A_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _snake_case ( ):
"""simple docstring"""
assert _test_patching.open is open
a_ : Tuple = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , A_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _snake_case ( ):
"""simple docstring"""
a_ : Any = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , A_ ):
pass
def _snake_case ( ):
"""simple docstring"""
a_ : int = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , A_ ) is None
with patch_submodule(_test_patching , """len""" , A_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _snake_case ( ):
"""simple docstring"""
a_ : List[str] = """__test_patch_submodule_start_and_stop_mock__"""
a_ : int = patch_submodule(_test_patching , """open""" , A_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _snake_case ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
a_ : Any = """__test_patch_submodule_successive_join__"""
a_ : Optional[Any] = """__test_patch_submodule_successive_dirname__"""
a_ : Optional[int] = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , A_ ):
with patch_submodule(_test_patching , """os.rename""" , A_ ):
with patch_submodule(_test_patching , """os.path.dirname""" , A_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , A_ ):
with patch_submodule(_test_patching , """os.path.join""" , A_ ):
with patch_submodule(_test_patching , """os.path.dirname""" , A_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _snake_case ( ):
"""simple docstring"""
a_ : Optional[int] = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , A_ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , A_ ):
pass
| 460
| 1
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 32
def UpperCAmelCase__ ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : int = 1_6 ):
__a : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
__a : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__a : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : Optional[Any] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : List[Any] = 1_6
elif accelerator.mixed_precision != "no":
__a : Union[str, Any] = 8
else:
__a : List[str] = None
return tokenizer.pad(
lowerCamelCase_ , padding='longest' , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
__a : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=lowerCamelCase_ )
__a : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
# Initialize accelerator
__a : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : List[str] = config['lr']
__a : Tuple = int(config['num_epochs'] )
__a : Tuple = int(config['seed'] )
__a : Union[str, Any] = int(config['batch_size'] )
__a : str = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__a : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__a : str = batch_size // MAX_GPU_BATCH_SIZE
__a : Optional[int] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase_ )
__a , __a : List[str] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : int = model.to(accelerator.device )
# Instantiate optimizer
__a : Any = AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
__a : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Dict = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__a : int = model(**lowerCamelCase_ )
__a : Optional[Any] = outputs.loss
__a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a : List[str] = model(**lowerCamelCase_ )
__a : Any = outputs.logits.argmax(dim=-1 )
__a , __a : int = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
__a : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCamelCase_ )
def UpperCAmelCase__ ( ):
__a : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__a : List[Any] = parser.parse_args()
__a : Optional[int] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47
|
import numpy as np
import datasets
A__ : int = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
A__ : Optional[int] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
A__ : Optional[int] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''', id='''sequence''' ), id='''X''' ),
} ), )
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : Optional[int] ):
'''simple docstring'''
# convert to numpy arrays
lowercase__ = np.array(lowerCamelCase )
lowercase__ = np.array(lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowercase__ = X - np.mean(lowerCamelCase )
lowercase__ = np.cov(reference_distribution.T )
try:
lowercase__ = np.linalg.inv(lowerCamelCase )
except np.linalg.LinAlgError:
lowercase__ = np.linalg.pinv(lowerCamelCase )
lowercase__ = np.dot(lowerCamelCase, lowerCamelCase )
lowercase__ = np.dot(lowerCamelCase, X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 183
| 0
|
from __future__ import annotations
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowerCamelCase_ : Any = number_of_bytes // partitions
lowerCamelCase_ : Dict = []
for i in range(__UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = i * bytes_per_partition + 1
lowerCamelCase_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
|
class snake_case_ :
'''simple docstring'''
def __init__( self : str ) -> Optional[int]:
lowerCamelCase_ : Optional[Any] = ""
lowerCamelCase_ : Dict = ""
lowerCamelCase_ : Union[str, Any] = []
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int , __magic_name__ : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCamelCase_ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCamelCase_ : Dict = self.__min_dist_top_down_dp(__magic_name__ , n - 1 )
lowerCamelCase_ : List[str] = self.__min_dist_top_down_dp(m - 1 , __magic_name__ )
lowerCamelCase_ : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCamelCase_ : Dict = 1 + min(__magic_name__ , __magic_name__ , __magic_name__ )
return self.dp[m][n]
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str , __magic_name__ : str ) -> int:
lowerCamelCase_ : int = worda
lowerCamelCase_ : List[Any] = worda
lowerCamelCase_ : List[Any] = [[-1 for _ in range(len(__magic_name__ ) )] for _ in range(len(__magic_name__ ) )]
return self.__min_dist_top_down_dp(len(__magic_name__ ) - 1 , len(__magic_name__ ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : str ) -> int:
lowerCamelCase_ : List[Any] = worda
lowerCamelCase_ : Tuple = worda
lowerCamelCase_ : List[Any] = len(__magic_name__ )
lowerCamelCase_ : int = len(__magic_name__ )
lowerCamelCase_ : List[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCamelCase_ : Optional[int] = j
elif j == 0: # second string is empty
lowerCamelCase_ : Union[str, Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCamelCase_ : str = self.dp[i - 1][j - 1]
else:
lowerCamelCase_ : Optional[Any] = self.dp[i][j - 1]
lowerCamelCase_ : Optional[Any] = self.dp[i - 1][j]
lowerCamelCase_ : Optional[Any] = self.dp[i - 1][j - 1]
lowerCamelCase_ : int = 1 + min(__magic_name__ , __magic_name__ , __magic_name__ )
return self.dp[m][n]
if __name__ == "__main__":
snake_case_ : Dict = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
snake_case_ : Any = input("Enter the first string: ").strip()
snake_case_ : Tuple = input("Enter the second string: ").strip()
print()
print(f"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(f"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 253
| 1
|
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A__ : str = logging.get_logger(__name__)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: Dict = set()
_lowercase: Any = []
def parse_line(_UpperCamelCase ):
for line in fp:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase: Tuple = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCamelCase ) > 0:
_lowercase: Union[str, Any] = '''\n'''.join(_UpperCamelCase )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(_UpperCamelCase )
buffer.clear()
continue
else:
_lowercase: str = line.strip()
buffer.append(_UpperCamelCase )
if from_gh:
for filename in os.listdir(_UpperCamelCase ):
_lowercase: Any = os.path.join(_UpperCamelCase , _UpperCamelCase )
if not os.path.isdir(_UpperCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCamelCase ) as fp:
parse_line(_UpperCamelCase )
else:
try:
with zipfile.ZipFile(_UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCamelCase ) as fp:
parse_line(_UpperCamelCase )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[str] = set()
_lowercase: Dict = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for p in os.listdir(_UpperCamelCase ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCamelCase , _UpperCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
return values.split(''',''' )
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
A__ : List[str] = parser.parse_args()
A__ : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A__ : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A__ : int = extract_warnings(args.output_dir, args.targets)
A__ : Union[str, Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 353
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : List[Any] = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 396
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> float:
'''simple docstring'''
return base * power(__magic_name__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
A_ : Tuple = int(input("Enter the base: ").strip())
A_ : Optional[Any] = int(input("Enter the exponent: ").strip())
A_ : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A_ : List[Any] = 1 / result
print(F'{base} to the power of {exponent} is {result}')
| 419
|
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : str ) -> str:
'''simple docstring'''
snake_case__ : int = len(__magic_name__ )
snake_case__ : int = len(__magic_name__ )
snake_case__ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
snake_case__ : list = []
for char_count in range(__magic_name__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__magic_name__ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 419
| 1
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case ( A__ ,A__=False ):
UpperCAmelCase_ : Any = OmegaConf.load(A__ )
if display:
print(yaml.dump(OmegaConf.to_container(A__ ) ) )
return config
def snake_case ( A__ ,A__=None ,A__=None ):
if conf_path is None:
UpperCAmelCase_ : Tuple = "./model_checkpoints/vqgan_only.yaml"
UpperCAmelCase_ : Dict = load_config(A__ ,display=A__ )
UpperCAmelCase_ : List[str] = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase_ : Optional[Any] = "./model_checkpoints/vqgan_only.pt"
UpperCAmelCase_ : Any = torch.load(A__ ,map_location=A__ )
if ".ckpt" in ckpt_path:
UpperCAmelCase_ : Tuple = sd["state_dict"]
model.load_state_dict(A__ ,strict=A__ )
model.to(A__ )
del sd
return model
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model.encode(A__ )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
UpperCAmelCase_ : Tuple = model.decode(A__ )
return xrec
def snake_case ( A__ ,A__=False ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = string.rsplit("." ,1 )
if reload:
UpperCAmelCase_ : Optional[int] = importlib.import_module(A__ )
importlib.reload(A__ )
return getattr(importlib.import_module(A__ ,package=A__ ) ,cls )
def snake_case ( A__ ):
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" ,{} ) )
def snake_case ( A__ ,A__ ,A__=True ,A__=True ):
UpperCAmelCase_ : List[Any] = instantiate_from_config(A__ )
if sd is not None:
model.load_state_dict(A__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case ( A__ ,A__ ,A__ ,A__ ):
# load the specified checkpoint
if ckpt:
UpperCAmelCase_ : Optional[int] = torch.load(A__ ,map_location="cpu" )
UpperCAmelCase_ : str = pl_sd["global_step"]
print(F"""loaded model from global step {global_step}.""" )
else:
UpperCAmelCase_ : Union[str, Any] = {"state_dict": None}
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Tuple = load_model_from_config(config.model ,pl_sd["state_dict"] ,gpu=A__ ,eval_mode=A__ )["model"]
return model, global_step
| 95
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Union[str, Any]=224 , lowerCAmelCase_ : List[Any]=30 , lowerCAmelCase_ : Any=400 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase_ : str=[0.5, 0.5, 0.5] , ) -> Dict:
UpperCAmelCase_ : int = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : List[str] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : List[str] = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size" ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : str = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Dict = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 95
| 1
|
import csv
import tweepy
# Twitter API credentials
lowercase = """"""
lowercase = """"""
lowercase = """"""
lowercase = """"""
def A__ ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
snake_case__ : Any = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ : Any = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
snake_case__ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
snake_case__ : Optional[int] = api.user_timeline(screen_name=_UpperCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
snake_case__ : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
snake_case__ : int = api.user_timeline(
screen_name=_UpperCAmelCase , count=2_00 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
snake_case__ : List[Any] = alltweets[-1].id - 1
print(F"""...{len(_UpperCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
snake_case__ : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
snake_case__ : Optional[Any] = csv.writer(_UpperCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 707
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple=0.9_9_9 , _UpperCAmelCase : Dict="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase : Dict ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase : List[str] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
snake_case__ : Optional[Any] = []
for i in range(_UpperCAmelCase ):
snake_case__ : Tuple = i / num_diffusion_timesteps
snake_case__ : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE_ ( _lowercase , _lowercase):
'''simple docstring'''
__magic_name__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
__magic_name__ : List[Any] = 2
@register_to_config
def __init__( self , lowerCamelCase__ = 1_000 , lowerCamelCase__ = 0.0_00_85 , lowerCamelCase__ = 0.0_12 , lowerCamelCase__ = "linear" , lowerCamelCase__ = None , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 1.0 , lowerCamelCase__ = "linspace" , lowerCamelCase__ = 0 , ) -> Tuple:
'''simple docstring'''
if trained_betas is not None:
snake_case__ : Dict = torch.tensor(lowerCamelCase__ , dtype=torch.floataa)
elif beta_schedule == "linear":
snake_case__ : Any = torch.linspace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case__ : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase__ , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case__ : str = betas_for_alpha_bar(lowerCamelCase__ , alpha_transform_type="cosine")
elif beta_schedule == "exp":
snake_case__ : Optional[int] = betas_for_alpha_bar(lowerCamelCase__ , alpha_transform_type="exp")
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""")
snake_case__ : Dict = 1.0 - self.betas
snake_case__ : List[Any] = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
snake_case__ : Tuple = use_karras_sigmas
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None) -> Optional[int]:
'''simple docstring'''
if schedule_timesteps is None:
snake_case__ : Optional[Any] = self.timesteps
snake_case__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
snake_case__ : Optional[Any] = 1 if len(lowerCamelCase__) > 1 else 0
else:
snake_case__ : Optional[int] = timestep.cpu().item() if torch.is_tensor(lowerCamelCase__) else timestep
snake_case__ : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
snake_case__ : Dict = self.index_for_timestep(lowerCamelCase__)
snake_case__ : int = self.sigmas[step_index]
snake_case__ : int = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> int:
'''simple docstring'''
snake_case__ : int = num_inference_steps
snake_case__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case__ : int = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase__ , dtype=lowerCamelCase__)[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case__ : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ : Any = (np.arange(0 , lowerCamelCase__) * step_ratio).round()[::-1].copy().astype(lowerCamelCase__)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case__ : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ : int = (np.arange(lowerCamelCase__ , 0 , -step_ratio)).round().copy().astype(lowerCamelCase__)
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""")
snake_case__ : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
snake_case__ : Optional[int] = np.log(lowerCamelCase__)
snake_case__ : Dict = np.interp(lowerCamelCase__ , np.arange(0 , len(lowerCamelCase__)) , lowerCamelCase__)
if self.config.use_karras_sigmas:
snake_case__ : Union[str, Any] = self._convert_to_karras(in_sigmas=lowerCamelCase__ , num_inference_steps=self.num_inference_steps)
snake_case__ : Optional[Any] = np.array([self._sigma_to_t(lowerCamelCase__ , lowerCamelCase__) for sigma in sigmas])
snake_case__ : Optional[Any] = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
snake_case__ : Dict = torch.from_numpy(lowerCamelCase__).to(device=lowerCamelCase__)
snake_case__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]])
snake_case__ : str = torch.from_numpy(lowerCamelCase__)
snake_case__ : int = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)])
if str(lowerCamelCase__).startswith("mps"):
# mps does not support float64
snake_case__ : Tuple = timesteps.to(lowerCamelCase__ , dtype=torch.floataa)
else:
snake_case__ : Dict = timesteps.to(device=lowerCamelCase__)
# empty dt and derivative
snake_case__ : int = None
snake_case__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case__ : int = defaultdict(lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = np.log(lowerCamelCase__)
# get distribution
snake_case__ : int = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case__ : Optional[Any] = np.cumsum((dists >= 0) , axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
snake_case__ : List[str] = low_idx + 1
snake_case__ : Optional[Any] = log_sigmas[low_idx]
snake_case__ : Tuple = log_sigmas[high_idx]
# interpolate sigmas
snake_case__ : List[Any] = (low - log_sigma) / (low - high)
snake_case__ : Any = np.clip(lowerCamelCase__ , 0 , 1)
# transform interpolation to time range
snake_case__ : Optional[Any] = (1 - w) * low_idx + w * high_idx
snake_case__ : Dict = t.reshape(sigma.shape)
return t
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> torch.FloatTensor:
'''simple docstring'''
snake_case__ : float = in_sigmas[-1].item()
snake_case__ : float = in_sigmas[0].item()
snake_case__ : Dict = 7.0 # 7.0 is the value used in the paper
snake_case__ : Any = np.linspace(0 , 1 , lowerCamelCase__)
snake_case__ : Dict = sigma_min ** (1 / rho)
snake_case__ : Tuple = sigma_max ** (1 / rho)
snake_case__ : Any = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self.dt is None
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
snake_case__ : str = self.index_for_timestep(lowerCamelCase__)
# advance index counter by 1
snake_case__ : int = timestep.cpu().item() if torch.is_tensor(lowerCamelCase__) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case__ : str = self.sigmas[step_index]
snake_case__ : Any = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case__ : Dict = self.sigmas[step_index - 1]
snake_case__ : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case__ : str = 0
snake_case__ : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case__ : int = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case__ : Union[str, Any] = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""")
if self.config.clip_sample:
snake_case__ : Union[str, Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case__ : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case__ : Any = sigma_next - sigma_hat
# store for 2nd order step
snake_case__ : List[Any] = derivative
snake_case__ : Dict = dt
snake_case__ : List[Any] = sample
else:
# 2. 2nd order / Heun's method
snake_case__ : str = (sample - pred_original_sample) / sigma_next
snake_case__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case__ : List[str] = self.dt
snake_case__ : Optional[Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case__ : Optional[Any] = None
snake_case__ : Any = None
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
snake_case__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase__):
# mps does not support float64
snake_case__ : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa)
snake_case__ : Dict = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
snake_case__ : Dict = self.timesteps.to(original_samples.device)
snake_case__ : Optional[Any] = timesteps.to(original_samples.device)
snake_case__ : Tuple = [self.index_for_timestep(lowerCamelCase__ , lowerCamelCase__) for t in timesteps]
snake_case__ : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
snake_case__ : Tuple = sigma.unsqueeze(-1)
snake_case__ : Optional[int] = original_samples + noise * sigma
return noisy_samples
def __len__( self) -> str:
'''simple docstring'''
return self.config.num_train_timesteps
| 150
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = MBartConfig
__A = {}
__A = """gelu"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ = prepare_mbart_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = TFMBartModel(config=__UpperCamelCase ).get_decoder()
snake_case_ = inputs_dict['input_ids']
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict['attention_mask'][:1, :]
snake_case_ = inputs_dict['head_mask']
snake_case_ = 1
# first forward pass
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
snake_case_ , snake_case_ = outputs.to_tuple()
snake_case_ = past_key_values[1]
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__A = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__A = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A = True
__A = False
__A = False
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFMBartModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
__A = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
__A = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
__A = """facebook/mbart-large-en-ro"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCAmelCase ( self , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.translate_src_text(**__UpperCamelCase )
self.assertListEqual(self.expected_text , __UpperCamelCase )
def __lowerCAmelCase ( self , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.tokenizer(self.src_text , **__UpperCamelCase , return_tensors='tf' )
snake_case_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case_ = self.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 187
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = 384
snake_case_ = 7
if "tiny" in model_name:
snake_case_ = 96
snake_case_ = (2, 2, 6, 2)
snake_case_ = (3, 6, 12, 24)
elif "small" in model_name:
snake_case_ = 96
snake_case_ = (2, 2, 18, 2)
snake_case_ = (3, 6, 12, 24)
elif "base" in model_name:
snake_case_ = 128
snake_case_ = (2, 2, 18, 2)
snake_case_ = (4, 8, 16, 32)
snake_case_ = 12
snake_case_ = 512
elif "large" in model_name:
snake_case_ = 192
snake_case_ = (2, 2, 18, 2)
snake_case_ = (6, 12, 24, 48)
snake_case_ = 12
snake_case_ = 768
# set label information
snake_case_ = 150
snake_case_ = 'huggingface/label-files'
snake_case_ = 'ade20k-id2label.json'
snake_case_ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(lowercase__ ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = SwinConfig(
embed_dim=lowercase__ , depths=lowercase__ , num_heads=lowercase__ , window_size=lowercase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
snake_case_ = UperNetConfig(
backbone_config=lowercase__ , auxiliary_in_channels=lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = dct.pop(lowercase__ )
snake_case_ = val
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
snake_case_ = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[:dim, :]
snake_case_ = in_proj_bias[: dim]
snake_case_ = in_proj_weight[
dim : dim * 2, :
]
snake_case_ = in_proj_bias[
dim : dim * 2
]
snake_case_ = in_proj_weight[
-dim :, :
]
snake_case_ = in_proj_bias[-dim :]
# fmt: on
def a(lowercase__ ):
'''simple docstring'''
snake_case_ , snake_case_ = x.shape
snake_case_ = x.reshape(lowercase__ , 4 , in_channel // 4 )
snake_case_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowercase__ , lowercase__ )
return x
def a(lowercase__ ):
'''simple docstring'''
snake_case_ , snake_case_ = x.shape
snake_case_ = x.reshape(lowercase__ , in_channel // 4 , 4 )
snake_case_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowercase__ , lowercase__ )
return x
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = x.shape[0]
snake_case_ = x.reshape(4 , in_channel // 4 )
snake_case_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowercase__ )
return x
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = x.shape[0]
snake_case_ = x.reshape(in_channel // 4 , 4 )
snake_case_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowercase__ )
return x
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
snake_case_ = model_name_to_url[model_name]
snake_case_ = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' , file_name=lowercase__ )[
'state_dict'
]
for name, param in state_dict.items():
print(lowercase__ , param.shape )
snake_case_ = get_upernet_config(lowercase__ )
snake_case_ = UperNetForSemanticSegmentation(lowercase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(lowercase__ )
if "bn" in key:
snake_case_ = key.replace('bn' , 'batch_norm' )
snake_case_ = val
# rename keys
snake_case_ = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
snake_case_ = reverse_correct_unfold_reduction_order(lowercase__ )
if "norm" in key:
snake_case_ = reverse_correct_unfold_norm_order(lowercase__ )
model.load_state_dict(lowercase__ )
# verify on image
snake_case_ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
snake_case_ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
snake_case_ = SegformerImageProcessor()
snake_case_ = processor(lowercase__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
snake_case_ = model(lowercase__ )
snake_case_ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
snake_case_ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
snake_case_ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
snake_case_ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
snake_case_ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 187
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
if not postfix_notation:
return 0
A_ : Union[str, Any] = {"""+""", """-""", """*""", """/"""}
A_ : Optional[int] = []
for token in postfix_notation:
if token in operations:
A_ , A_ : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__lowerCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = DanceDiffusionPipeline
lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
torch.manual_seed(0 )
A_ : Tuple = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_lowerCamelCase , use_timestep_embedding=_lowerCamelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
A_ : Optional[int] = IPNDMScheduler()
A_ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> List[str]:
if str(_lowerCamelCase ).startswith("""mps""" ):
A_ : Union[str, Any] = torch.manual_seed(_lowerCamelCase )
else:
A_ : int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Any = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.get_dummy_components()
A_ : Any = DanceDiffusionPipeline(**_lowerCamelCase )
A_ : List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Tuple = self.get_dummy_inputs(_lowerCamelCase )
A_ : Union[str, Any] = pipe(**_lowerCamelCase )
A_ : Union[str, Any] = output.audios
A_ : Any = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
A_ : str = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase_ ( self ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase_ ( self ) -> List[Any]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase_ ( self ) -> Optional[int]:
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[Any] = torch_device
A_ : str = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
A_ : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Dict = pipe(generator=_lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
A_ : Any = output.audios
A_ : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A_ : List[str] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ) -> str:
A_ : Union[str, Any] = torch_device
A_ : List[str] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
A_ : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Dict = torch.manual_seed(0 )
A_ : Tuple = pipe(generator=_lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
A_ : Dict = output.audios
A_ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A_ : Union[str, Any] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 385
| 0
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__A = """scheduler_config.json"""
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = 1
__magic_name__ :Optional[Any] = 2
__magic_name__ :Optional[Any] = 3
__magic_name__ :List[Any] = 4
__magic_name__ :int = 5
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :jnp.ndarray
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Tuple = SCHEDULER_CONFIG_NAME
__magic_name__ :Dict = ["""dtype"""]
__magic_name__ :str = []
__magic_name__ :Tuple = True
@classmethod
def snake_case ( cls , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = cls.load_config(
pretrained_model_name_or_path=__UpperCAmelCase , subfolder=__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = cls.from_config(__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase )
if hasattr(__UpperCAmelCase , 'create_state' ) and getattr(__UpperCAmelCase , 'has_state' , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = False , **__UpperCAmelCase ):
'''simple docstring'''
self.save_config(save_directory=__UpperCAmelCase , push_to_hub=__UpperCAmelCase , **__UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def snake_case ( cls ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ :Any = importlib.import_module(__name__.split('.' )[0] )
lowerCAmelCase__ :Union[str, Any] = [
getattr(__UpperCAmelCase , __UpperCAmelCase ) for c in compatible_classes_str if hasattr(__UpperCAmelCase , __UpperCAmelCase )
]
return compatible_classes
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->jnp.ndarray:
"""simple docstring"""
assert len(_SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_SCREAMING_SNAKE_CASE ) - x.ndim) ) , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9_9_9 , _SCREAMING_SNAKE_CASE=jnp.floataa ) ->jnp.ndarray:
"""simple docstring"""
def alpha_bar(_SCREAMING_SNAKE_CASE ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
lowerCAmelCase__ :Tuple = []
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = i / num_diffusion_timesteps
lowerCAmelCase__ :List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_SCREAMING_SNAKE_CASE ) / alpha_bar(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return jnp.array(_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :jnp.ndarray
__magic_name__ :jnp.ndarray
__magic_name__ :jnp.ndarray
@classmethod
def snake_case ( cls , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ :Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ :Tuple = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ :Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ :str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
lowerCAmelCase__ :Optional[Any] = 1.0 - betas
lowerCAmelCase__ :Union[str, Any] = jnp.cumprod(__UpperCAmelCase , axis=0 )
return cls(
alphas=__UpperCAmelCase , betas=__UpperCAmelCase , alphas_cumprod=__UpperCAmelCase , )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = state.alphas_cumprod
lowerCAmelCase__ :Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ :Dict = sqrt_alpha_prod.flatten()
lowerCAmelCase__ :Any = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
lowerCAmelCase__ :Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ :Optional[int] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ :Optional[int] = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :str = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 93
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :int = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('No input value was provided' )
lowerCAmelCase__ :Dict = '-' if number.startswith('-' ) else ''
lowerCAmelCase__ :int = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93
| 1
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=[1, 1, 2] , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Any = seq_length
SCREAMING_SNAKE_CASE_ : Optional[int] = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = block_sizes
SCREAMING_SNAKE_CASE_ : Tuple = num_decoder_layers
SCREAMING_SNAKE_CASE_ : Any = d_model
SCREAMING_SNAKE_CASE_ : List[Any] = n_head
SCREAMING_SNAKE_CASE_ : int = d_head
SCREAMING_SNAKE_CASE_ : List[Any] = d_inner
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE_ : List[str] = scope
SCREAMING_SNAKE_CASE_ : Any = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE_ : Any = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE_ : Any = self.num_hidden_layers + 2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Dict = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFFunnelModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Any = TFFunnelModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = TFFunnelModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Dict = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[Any] = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFFunnelForPreTraining(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFFunnelForMaskedLM(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = TFFunnelForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFFunnelForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : int = TFFunnelForTokenClassification(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFFunnelForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _A ( __magic_name__ , __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : str = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : str = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = False
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE_ : Dict = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@require_tf
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Union[str, Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Dict = False
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFFunnelModelTester(self , base=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
| 353
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase):
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
SCREAMING_SNAKE_CASE_ : Any = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : str = load_dataset('nielsr/rvlcdip-demo' )
SCREAMING_SNAKE_CASE_ : List[Any] = dataset['train'][0]['image'].convert('RGB' )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 16) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=_SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 353
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Optional[int] = BertJapaneseTokenizer
UpperCAmelCase : List[str] = False
UpperCAmelCase : Tuple = True
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : str , snake_case : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 'こんにちは、世界。 \nこんばんは、世界。'
SCREAMING_SNAKE_CASE : List[str] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowerCamelCase_ ( self : Union[str, Any] , snake_case : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.get_input_output_texts(snake_case )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
return text, ids
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(snake_case )
SCREAMING_SNAKE_CASE : Dict = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(snake_case , 'wb' ) as handle:
pickle.dump(snake_case , snake_case )
with open(snake_case , 'rb' ) as handle:
SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
SCREAMING_SNAKE_CASE : Dict = tokenizer_new.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : List[str] = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : str = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MecabTokenizer(do_lower_case=snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Tuple = MecabTokenizer(
do_lower_case=snake_case , normalize_text=snake_case , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MecabTokenizer(normalize_text=snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(snake_case )
SCREAMING_SNAKE_CASE : Dict = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(snake_case , 'wb' ) as handle:
pickle.dump(snake_case , snake_case )
with open(snake_case , 'rb' ) as handle:
SCREAMING_SNAKE_CASE : List[Any] = pickle.load(snake_case )
SCREAMING_SNAKE_CASE : Dict = tokenizer_new.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
@require_sudachi
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = SudachiTokenizer(do_lower_case=snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = SudachiTokenizer(normalize_text=snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = SudachiTokenizer(trim_whitespace=snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(snake_case )
SCREAMING_SNAKE_CASE : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(snake_case , 'wb' ) as handle:
pickle.dump(snake_case , snake_case )
with open(snake_case , 'rb' ) as handle:
SCREAMING_SNAKE_CASE : Any = pickle.load(snake_case )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_new.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
@require_jumanpp
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = JumanppTokenizer(do_lower_case=snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = JumanppTokenizer(normalize_text=snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = JumanppTokenizer(trim_whitespace=snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for i, token in enumerate(snake_case ):
SCREAMING_SNAKE_CASE : Union[str, Any] = i
SCREAMING_SNAKE_CASE : List[str] = WordpieceTokenizer(vocab=snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(snake_case , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
SCREAMING_SNAKE_CASE : int = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(snake_case , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode('どういたしまして。' , add_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(snake_case )
SCREAMING_SNAKE_CASE : int = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : List[Any] = BertJapaneseTokenizer
UpperCAmelCase : Any = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : int = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : Any , **snake_case : int ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **snake_case )
def lowerCamelCase_ ( self : Any , snake_case : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'こんにちは、世界。 \nこんばんは、世界。'
SCREAMING_SNAKE_CASE : List[str] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
snake_case , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for i, token in enumerate(snake_case ):
SCREAMING_SNAKE_CASE : Optional[int] = i
SCREAMING_SNAKE_CASE : Any = CharacterTokenizer(vocab=snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('ありがとう。' , add_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('どういたしまして。' , add_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(snake_case )
SCREAMING_SNAKE_CASE : Dict = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase ( unittest.TestCase):
'''simple docstring'''
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cl-tohoku/bert-base-japanese'
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
class lowercase ( unittest.TestCase):
'''simple docstring'''
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
SCREAMING_SNAKE_CASE : List[Any] = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 352
|
from datetime import datetime
import requests
def __a ( __lowerCAmelCase ) -> bytes:
SCREAMING_SNAKE_CASE : int = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
SCREAMING_SNAKE_CASE : Any = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(__lowerCAmelCase ).content
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input("""Enter Video/IGTV url: """).strip()
_lowerCamelCase : int = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 352
| 1
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _UpperCamelCase( unittest.TestCase ):
def a__ ( self : str ):
_UpperCAmelCase : Tuple = get_activation("swish" )
self.assertIsInstance(_lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def a__ ( self : List[str] ):
_UpperCAmelCase : List[str] = get_activation("silu" )
self.assertIsInstance(_lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def a__ ( self : List[str] ):
_UpperCAmelCase : List[str] = get_activation("mish" )
self.assertIsInstance(_lowerCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def a__ ( self : List[str] ):
_UpperCAmelCase : List[str] = get_activation("gelu" )
self.assertIsInstance(_lowerCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 718
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: Optional[Any] = """microsoft/speecht5_tts"""
__A: Tuple = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__A: Any = """text_reader"""
__A: Optional[Any] = SpeechTaProcessor
__A: int = SpeechTaForTextToSpeech
__A: Tuple = SpeechTaHifiGan
__A: Optional[Any] = ["""text"""]
__A: int = ["""audio"""]
def a__ ( self : List[str] ):
if self.post_processor is None:
_UpperCAmelCase : Union[str, Any] = "microsoft/speecht5_hifigan"
super().setup()
def a__ ( self : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=None ):
_UpperCAmelCase : Any = self.pre_processor(text=_lowerCamelCase , return_tensors="pt" , truncation=_lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
_UpperCAmelCase : str = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
_UpperCAmelCase : Optional[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def a__ ( self : Union[str, Any] , _lowerCamelCase : List[str] ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCamelCase )
def a__ ( self : int , _lowerCamelCase : str ):
with torch.no_grad():
return self.post_processor(_lowerCamelCase ).cpu().detach()
| 328
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class a__ ( a__ , a__ ):
'''simple docstring'''
lowercase__ : str = "focalnet"
def __init__( self , lowerCamelCase_=2_24 , lowerCamelCase_=4 , lowerCamelCase_=3 , lowerCamelCase_=96 , lowerCamelCase_=False , lowerCamelCase_=[1_92, 3_84, 7_68, 7_68] , lowerCamelCase_=[2, 2, 6, 2] , lowerCamelCase_=[2, 2, 2, 2] , lowerCamelCase_=[3, 3, 3, 3] , lowerCamelCase_="gelu" , lowerCamelCase_=4.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=False , lowerCamelCase_=1e-4 , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_=32 , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> str:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = use_conv_embed
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = focal_levels
lowerCAmelCase__ = focal_windows
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = use_layerscale
lowerCAmelCase__ = layerscale_value
lowerCAmelCase__ = use_post_layernorm
lowerCAmelCase__ = use_post_layernorm_in_modulation
lowerCAmelCase__ = normalize_modulator
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = encoder_stride
lowerCAmelCase__ = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 90
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase_ = """\
Text data.
Second line of data."""
UpperCamelCase_ = """file"""
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[str, Any]:
_lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
_lowerCAmelCase : Tuple = bytes(_lowerCamelCase , """utf-8""" )
with zstd.open(_lowerCamelCase , """wb""" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> Dict:
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , """w""" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int ) -> Optional[Any]:
_lowerCAmelCase : Tuple = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
_lowerCAmelCase : Dict = input_paths[compression_format]
_lowerCAmelCase : List[Any] = tmp_path / """cache"""
_lowerCAmelCase : Optional[int] = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : Dict = f.read()
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : Optional[int] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict ) -> int:
_lowerCAmelCase : List[str] = """custom_cache"""
_lowerCAmelCase : Tuple = """custom_extracted_dir"""
_lowerCAmelCase : Dict = tmp_path / """custom_extracted_path"""
if default_extracted:
_lowerCAmelCase : int = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _lowerCamelCase )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_lowerCamelCase ) )
_lowerCAmelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCAmelCase : Optional[Any] = xz_file
_lowerCAmelCase : Any = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCAmelCase : List[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Dict:
# absolute path
_lowerCAmelCase : List[Any] = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCAmelCase : Any = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> Optional[int]:
# absolute path
_lowerCAmelCase : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCAmelCase : int = """./__missing_file__.txt"""
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
_lowerCAmelCase : str = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowerCamelCase )
def _UpperCAmelCase ( ) -> str:
with pytest.raises(_lowerCamelCase ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_lowerCamelCase ):
http_get("""https://huggingface.co""" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> Optional[int]:
_lowerCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_lowerCamelCase ):
ftp_get("""ftp://huggingface.co""" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : int ) -> Optional[int]:
_lowerCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_lowerCamelCase ):
fsspec_get("""s3://huggingface.co""" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("""s3://huggingface.co""" )
| 384
| 0
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Any ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(UpperCAmelCase_ )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self._create_example_records()
SCREAMING_SNAKE_CASE__ = Dataset.from_list(UpperCAmelCase_ )
self.assertListEqual(dset.column_names , ['col_1', 'col_2'] )
for i, r in enumerate(UpperCAmelCase_ ):
self.assertDictEqual(UpperCAmelCase_ , example_records[i] )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self._create_example_records()
SCREAMING_SNAKE_CASE__ = Dataset.from_list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A_ ( self : Tuple ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ = [{'col_1': 1}, {'col_2': 'x'}]
SCREAMING_SNAKE_CASE__ = Dataset.from_list(UpperCAmelCase_ )
self.assertDictEqual(dset[0] , {'col_1': 1} )
self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns
def A_ ( self : str ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ = [{'col_1': []}, {'col_1': [1, 2]}]
SCREAMING_SNAKE_CASE__ = Dataset.from_list(UpperCAmelCase_ )
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 400
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LayoutLMv3FeatureExtractor"""]
__snake_case = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 400
| 1
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a_ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[int] = {}
state_dict.pop('''pixel_mean''' , __UpperCamelCase )
state_dict.pop('''pixel_std''' , __UpperCamelCase )
__lowercase : List[str] = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase : Any = key.replace(__UpperCamelCase , __UpperCamelCase )
if re.match(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = int(re.match(__UpperCamelCase , __UpperCamelCase ).group(2 ) )
if layer_nb == 0:
__lowercase : Union[str, Any] = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
__lowercase : List[Any] = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
__lowercase : int = key.replace('''layers.2''' , '''proj_out''' )
__lowercase : Tuple = value
__lowercase : int = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="ybelkada/segment-anything" ):
__lowercase : int = hf_hub_download(__UpperCamelCase , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
__lowercase : int = SamConfig()
elif "sam_vit_l" in model_name:
__lowercase : Dict = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__lowercase : List[Any] = SamConfig(
vision_config=__UpperCamelCase , )
elif "sam_vit_h" in model_name:
__lowercase : Dict = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__lowercase : List[str] = SamConfig(
vision_config=__UpperCamelCase , )
__lowercase : List[Any] = torch.load(__UpperCamelCase , map_location='''cpu''' )
__lowercase : List[Any] = replace_keys(__UpperCamelCase )
__lowercase : Optional[Any] = SamImageProcessor()
__lowercase : Union[str, Any] = SamProcessor(image_processor=__UpperCamelCase )
__lowercase : List[Any] = SamModel(__UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
__lowercase : Dict = hf_model.to('''cuda''' )
__lowercase : List[str] = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
__lowercase : List[str] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
__lowercase : Union[str, Any] = [[[4_00, 6_50]]]
__lowercase : List[str] = [[1]]
__lowercase : Optional[int] = processor(images=np.array(__UpperCamelCase ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__lowercase : Dict = hf_model(**__UpperCamelCase )
__lowercase : List[str] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
__lowercase : Optional[int] = processor(
images=np.array(__UpperCamelCase ) , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__lowercase : str = hf_model(**__UpperCamelCase )
__lowercase : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
__lowercase : List[str] = ((75, 2_75, 17_25, 8_50),)
__lowercase : int = processor(images=np.array(__UpperCamelCase ) , input_boxes=__UpperCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__lowercase : str = hf_model(**__UpperCamelCase )
__lowercase : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
__lowercase : Optional[Any] = [[[4_00, 6_50], [8_00, 6_50]]]
__lowercase : Union[str, Any] = [[1, 1]]
__lowercase : str = processor(
images=np.array(__UpperCamelCase ) , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__lowercase : Optional[int] = hf_model(**__UpperCamelCase )
__lowercase : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
a_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 76
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __A : Any , __A : Dict , __A : Optional[int]=None ) ->Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
__A =nn.Parameter(__A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
__A =nn.Parameter(__A )
def A__ ( __A : List[Any] , __A : Tuple , __A : List[Any] ) ->Dict:
# set torch weights for 1-to-1 comparison
__A =np.asarray(weights[0] )
__A =np.asarray(weights[1] )
__A =np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A__ ( __A : Optional[Any] , __A : Tuple , __A : Optional[Any] ) ->int:
# set torch weights for 1-to-1 comparison
__A =np.asarray(weights[0] )
__A =np.asarray(weights[1] )
__A =np.asarray(weights[2] )
__A =np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A__ ( __A : int , __A : List[str] , __A : Optional[Any] ) ->Any:
# layernorm 1
__A =weights[0][0][0]
__A =np.asarray(layer_norm_a[0] )
__A =np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# lsh weights + output
__A =weights[0][1]
if len(__A ) < 4:
set_layer_weights_in_torch_lsh(__A , torch_block.attention , __A )
else:
set_layer_weights_in_torch_local(__A , torch_block.attention , __A )
# intermediate weighs
__A =weights[2][0][1][2]
# Chunked Feed Forward
if len(__A ) == 4:
__A =intermediate_weights[2]
# layernorm 2
__A =np.asarray(intermediate_weights[0][0] )
__A =np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# intermediate dense
__A =np.asarray(intermediate_weights[1][0] )
__A =np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
# intermediate out
__A =np.asarray(intermediate_weights[4][0] )
__A =np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A__ ( __A : Tuple , __A : List[Any] , __A : Optional[int] ) ->List[Any]:
# reformer model
__A =torch_model.reformer
# word embeds
__A =np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__A ) , )
if isinstance(weights[3] , __A ):
__A =torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__A =np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
__A =nn.Parameter(torch.tensor(__A ) )
__A =weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__A =trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__A , __A , __A )
# output layer norm
__A =np.asarray(weights[7][0] )
__A =np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# output embeddings
__A =np.asarray(weights[9][0] )
__A =np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A__ ( __A : int , __A : Any , __A : Tuple ) ->Union[str, Any]:
# Initialise PyTorch model
__A =ReformerConfig.from_json_file(__A )
print(F'''Building PyTorch model from configuration: {config}''' )
__A =ReformerModelWithLMHead(__A )
with open(__A , '''rb''' ) as f:
__A =pickle.load(__A )['''weights''']
set_model_weights_in_torch(__A , __A , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 184
| 0
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase_ : List[str] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase_ : List[str] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE ( lowercase_ : list[list[int]] ):
lowercase = []
for i in range(len(lowercase_ ) ):
lowercase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowercase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowercase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase_ )
return next_generation
def SCREAMING_SNAKE_CASE ( lowercase_ : list[list[int]] , lowercase_ : int ):
lowercase = []
for _ in range(lowercase_ ):
# Create output image
lowercase = Image.new("""RGB""" , (len(cells[0] ), len(lowercase_ )) )
lowercase = img.load()
# Save cells to image
for x in range(len(lowercase_ ) ):
for y in range(len(cells[0] ) ):
lowercase = 255 - cells[y][x] * 255
lowercase = (colour, colour, colour)
# Save image
images.append(lowercase_ )
lowercase = new_generation(lowercase_ )
return images
if __name__ == "__main__":
lowercase_ : Tuple = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 701
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653
| 0
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = "isbn/0140328726" ):
_snake_case = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
_snake_case = f"""{olid} is not a valid Open Library olid"""
raise ValueError(__snake_case )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_snake_case = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_snake_case = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
_snake_case = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(__snake_case , __snake_case ):
_snake_case = """, """.join(__snake_case )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__lowerCAmelCase = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__lowerCAmelCase = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 585
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase (a_ ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """FlavaImageProcessor"""
snake_case_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase )-> Tuple:
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCamelCase , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = self.image_processor
def __call__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> int:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if images is not None:
__lowerCAmelCase = self.image_processor(
__UpperCamelCase , return_image_mask=__UpperCamelCase , return_codebook_pixels=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if text is not None and images is not None:
encoding.update(__UpperCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> int:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[int]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCAmelCase ( self )-> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self )-> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , )
return self.image_processor
| 367
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case ( lowercase_ ):
"""simple docstring"""
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowercase, 'width_multiplier' ) )
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase, _lowercase=13, _lowercase=64, _lowercase=2, _lowercase=3, _lowercase="swish", _lowercase=3, _lowercase=32, _lowercase=0.1, _lowercase=0.02, _lowercase=True, _lowercase=True, _lowercase=10, _lowercase=None, _lowercase=0.25, _lowercase=0.0, _lowercase=0.0, ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = make_divisible(512 * width_multiplier, divisor=8 )
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = conv_kernel_size
SCREAMING_SNAKE_CASE_ = output_stride
SCREAMING_SNAKE_CASE_ = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = width_multiplier
SCREAMING_SNAKE_CASE_ = ffn_dropout
SCREAMING_SNAKE_CASE_ = attn_dropout
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def a__ ( self ) -> Optional[Any]:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
SCREAMING_SNAKE_CASE_ = MobileViTVaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileViTVaForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowercase, labels=_lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileViTVaForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowercase )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE_ = model(_lowercase, labels=_lowercase )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = MobileViTVaModelTester(self )
SCREAMING_SNAKE_CASE_ = MobileViTVaConfigTester(self, config_class=_lowercase, has_text_modality=_lowercase )
def a__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def a__ ( self ) -> List[str]:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def a__ ( self ) -> List[Any]:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def a__ ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def a__ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1], _lowercase )
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a__ ( self ) -> str:
def check_hidden_states_output(_lowercase, _lowercase, _lowercase ):
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_lowercase, _lowercase ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = 5
self.assertEqual(len(_lowercase ), _lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE_ = 2
for i in range(len(_lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_lowercase, _lowercase, _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_lowercase, _lowercase, _lowercase )
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
@slow
def a__ ( self ) -> Dict:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = MobileViTVaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _UpperCamelCase ( ) -> List[str]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> Dict:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
_lowercase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowercase, return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, _lowercase )
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _lowercase, atol=1E-4 ) )
@slow
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE_ = model.to(_lowercase )
SCREAMING_SNAKE_CASE_ = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowercase, return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowercase )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, _lowercase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
], device=_lowercase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], _lowercase, atol=1E-4 ) )
@slow
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE_ = model.to(_lowercase )
SCREAMING_SNAKE_CASE_ = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowercase, return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowercase )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=_lowercase, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, _lowercase )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=_lowercase )
SCREAMING_SNAKE_CASE_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, _lowercase )
| 713
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' ,[None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' ,['default', 0, 100 * 2**20, 900 * 2**20] )
def _UpperCamelCase ( lowerCAmelCase__: Union[str, Any] ,lowerCAmelCase__: Optional[int] ,lowerCAmelCase__: List[Any] ) -> List[str]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,'IN_MEMORY_MAX_SIZE' ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = is_small_dataset(lowerCAmelCase__ )
assert result == expected
| 238
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase_ : int = logging.getLogger(__name__)
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : List[Any] = """sequence-classification"""
def __init__( self , UpperCAmelCase__ ):
if type(UpperCAmelCase__ ) == dict:
A__ = Namespace(**UpperCAmelCase__ )
A__ = glue_output_modes[hparams.task]
A__ = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ , self.mode )
def __A ( self , **UpperCAmelCase__ ):
return self.model(**UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A__ = self(**UpperCAmelCase__ )
A__ = outputs[0]
A__ = self.trainer.lr_schedulers[0]["scheduler"]
A__ = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __A ( self ):
A__ = self.hparams
A__ = processors[args.task]()
A__ = processor.get_labels()
for mode in ["train", "dev"]:
A__ = self._feature_file(UpperCAmelCase__ )
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCAmelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
A__ = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
A__ = convert_examples_to_features(
UpperCAmelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ):
A__ = "dev" if mode == "test" else mode
A__ = self._feature_file(UpperCAmelCase__ )
logger.info("Loading features from cached file %s" , UpperCAmelCase__ )
A__ = torch.load(UpperCAmelCase__ )
A__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A__ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A__ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , batch_size=UpperCAmelCase__ , shuffle=UpperCAmelCase__ , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A__ = self(**UpperCAmelCase__ )
A__ , A__ = outputs[:2]
A__ = logits.detach().cpu().numpy()
A__ = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __A ( self , UpperCAmelCase__ ):
A__ = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
A__ = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A__ = np.argmax(UpperCAmelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A__ = np.squeeze(UpperCAmelCase__ )
A__ = np.concatenate([x["target"] for x in outputs] , axis=0 )
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCAmelCase__ , UpperCAmelCase__ )}
A__ = dict(results.items() )
A__ = results
return ret, preds_list, out_label_list
def __A ( self , UpperCAmelCase__ ):
A__ , A__ , A__ = self._eval_end(UpperCAmelCase__ )
A__ = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __A ( self , UpperCAmelCase__ ):
A__ , A__ , A__ = self._eval_end(UpperCAmelCase__ )
A__ = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __A ( UpperCAmelCase__ , UpperCAmelCase__ ):
BaseTransformer.add_model_specific_args(UpperCAmelCase__ , UpperCAmelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCAmelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def UpperCamelCase ( )-> List[str]:
"""simple docstring"""
A__ = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
A__ = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
A__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A__ = os.path.join(
"./results" , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
A__ = GLUETransformer(_A )
A__ = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A__ = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_A ) )
A__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 491
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def __A ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCAmelCase__ , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = OpenLlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = True
A__ = OpenLlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = True
A__ = True
A__ = OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([input_mask, next_mask] , dim=-1 )
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCAmelCase : Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase : Dict = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Union[str, Any] = False
def __A ( self ):
A__ = OpenLlamaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = "single_label_classification"
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = "multi_label_classification"
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def __A ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __A ( self , UpperCAmelCase__ ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 10] , config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = OpenLlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
A__ = original_model(UpperCAmelCase__ ).last_hidden_state
A__ = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = {"type": scaling_type, "factor": 10.0}
A__ = OpenLlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
A__ = scaled_model(UpperCAmelCase__ ).last_hidden_state
A__ = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
| 491
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
a = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
a = '''▁'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Dict = ['''input_ids''', '''token_type_ids''']
UpperCAmelCase : Tuple = FNetTokenizer
def __init__( self : Dict , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : List[str]="[SEP]" , _UpperCAmelCase : Tuple="<pad>" , _UpperCAmelCase : Union[str, Any]="[CLS]" , _UpperCAmelCase : Dict="[MASK]" , **_UpperCAmelCase : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 703
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 505
| 0
|
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = list(range(len(lowerCamelCase__ ) ) )
lowerCamelCase = [v / w for v, w in zip(lowerCamelCase__ , lowerCamelCase__ )]
index.sort(key=lambda lowerCamelCase__ : ratio[i] , reverse=lowerCamelCase__ )
lowerCamelCase = 0
lowerCamelCase = [0] * len(lowerCamelCase__ )
for i in index:
if weight[i] <= capacity:
lowerCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 457
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase : Any = logging.getLogger(__name__)
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=lowerCamelCase__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=lowerCamelCase__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=lowerCamelCase__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=lowerCamelCase__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=lowerCamelCase__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=lowerCamelCase__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=lowerCamelCase__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCamelCase = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
def fn(lowerCamelCase__ : Any ):
return tokenizer(examples["""text"""] )
return fn
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCamelCase = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCamelCase = tf.train.Features(feature=lowerCamelCase__ )
lowerCamelCase = tf.train.Example(features=lowerCamelCase__ )
lowerCamelCase = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase = min(len(lowerCamelCase__ ) , args.limit )
lowerCamelCase = dataset.select(range(lowerCamelCase__ ) )
print(f'Limiting the dataset to {args.limit} entries.' )
lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowerCamelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase = tokenize_function(lowerCamelCase__ )
lowerCamelCase = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ : List[str] ):
# Concatenate all texts.
lowerCamelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1000 , num_proc=4 )
lowerCamelCase = 0
lowerCamelCase = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowerCamelCase = grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase = len(dataset_snapshot["""input_ids"""] )
lowerCamelCase = os.path.join(lowerCamelCase__ , f'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCamelCase = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowerCamelCase = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("""Wrote file {} containing {} records""".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , """w""" ) as f:
print(f'Total {args.split} records: {total_records}' , file=lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = parse_args()
main(args)
| 457
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __snake_case ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = hf_hub_download(
repo_id='nateraw/video-demo' ,filename='archery.mp4' ,repo_type='dataset' )
lowerCAmelCase__ = VideoClassificationPipeline(model=a_ ,image_processor=a_ ,top_k=2 )
lowerCAmelCase__ = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
for example in examples:
lowerCAmelCase__ = video_classifier(a_ )
self.assertEqual(
a_ ,[
{'score': ANY(a_ ), 'label': ANY(a_ )},
{'score': ANY(a_ ), 'label': ANY(a_ )},
] ,)
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
lowerCAmelCase__ = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} ,crop_size={'height': 10, 'width': 10} )
lowerCAmelCase__ = pipeline(
'video-classification' ,model=a_ ,feature_extractor=a_ ,frame_sampling_rate=4 )
lowerCAmelCase__ = hf_hub_download(repo_id='nateraw/video-demo' ,filename='archery.mp4' ,repo_type='dataset' )
lowerCAmelCase__ = video_classifier(a_ ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] ,)
lowerCAmelCase__ = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] ,)
@require_tf
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
| 604
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] ,dtype=tf.floataa ,)
lowerCAmelCase__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] ,dtype=tf.intaa ,) # expected non filtered idx as noted above
lowerCAmelCase__ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] ,dtype=tf.floataa ,) # expected non filtered values as noted above
lowerCAmelCase__ = tf_top_k_top_p_filtering(a_ ,top_k=10 ,top_p=0.6 ,min_tokens_to_keep=4 )
lowerCAmelCase__ = output[output != -float('inf' )]
lowerCAmelCase__ = tf.cast(
tf.where(tf.not_equal(a_ ,tf.constant(-float('inf' ) ,dtype=tf.floataa ) ) ) ,dtype=tf.intaa ,)
tf.debugging.assert_near(a_ ,a_ ,rtol=1e-1_2 )
tf.debugging.assert_equal(a_ ,a_ )
@require_tf
class __snake_case ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
SCREAMING_SNAKE_CASE__ = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
class __snake_case ( tf.Module ):
def __init__( self ,a_ ):
"""simple docstring"""
super(a_ ,self ).__init__()
lowerCAmelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((None, input_length) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model.generate(
input_ids=a_ ,attention_mask=a_ ,max_new_tokens=a_ ,return_dict_in_generate=a_ ,)
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ = [[2, 0], [102, 103]]
lowerCAmelCase__ = [[1, 0], [1, 1]]
lowerCAmelCase__ = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ ,a_ ,signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase__ = tf.saved_model.load(a_ ).signatures['serving_default']
for batch_size in range(1 ,len(a_ ) + 1 ):
lowerCAmelCase__ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase__ = serving_func(**a_ )['sequences']
lowerCAmelCase__ = test_model.generate(**a_ ,max_new_tokens=a_ )
tf.debugging.assert_equal(a_ ,a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
class __snake_case ( tf.Module ):
def __init__( self ,a_ ):
"""simple docstring"""
super(a_ ,self ).__init__()
lowerCAmelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model.generate(
input_ids=a_ ,attention_mask=a_ ,max_new_tokens=a_ ,return_dict_in_generate=a_ ,)
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ = [[2], [102, 103]]
lowerCAmelCase__ = [[1], [1, 1]]
lowerCAmelCase__ = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ ,a_ ,signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase__ = tf.saved_model.load(a_ ).signatures['serving_default']
for input_row in range(len(a_ ) ):
lowerCAmelCase__ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase__ = serving_func(**a_ )['sequences']
lowerCAmelCase__ = test_model.generate(**a_ ,max_new_tokens=a_ )
tf.debugging.assert_equal(a_ ,a_ )
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' ,filename='spiece.model' ,local_dir=a_ )
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a_ ,'spiece.model' ) ,'rb' ).read() )
lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,*a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.tokenize(a_ )
lowerCAmelCase__ , lowerCAmelCase__ = text.pad_model_inputs(
a_ ,max_seq_length=64 ,pad_value=self.model.config.pad_token_id )
lowerCAmelCase__ = self.model.generate(input_ids=a_ ,attention_mask=a_ )
return self.tokenizer.detokenize(a_ )
lowerCAmelCase__ = CompleteSentenceTransformer()
lowerCAmelCase__ = tf.keras.layers.Input(shape=(1,) ,dtype=tf.string ,name='inputs' )
lowerCAmelCase__ = complete_model(a_ )
lowerCAmelCase__ = tf.keras.Model(a_ ,a_ )
keras_model.save(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase__ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
lowerCAmelCase__ = 14
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 'Hello, my dog is cute and'
lowerCAmelCase__ = tokenizer(a_ ,return_tensors='tf' )
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase__ = model.generate(**a_ ,eos_token_id=a_ ,**a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase__ = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase__ = model.generate(**a_ ,eos_token_id=a_ ,**a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = 'Hugging Face is a technology company based in New York and Paris.'
lowerCAmelCase__ = bart_tokenizer(a_ ,return_tensors='tf' ).input_ids
lowerCAmelCase__ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = bart_model.generate(a_ ).numpy()
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,**a_ ):
"""simple docstring"""
return super().call(a_ ,**a_ )
lowerCAmelCase__ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = bart_model.generate(a_ ,foo='bar' ).numpy()
self.assertTrue(np.array_equal(a_ ,a_ ) )
class __snake_case ( bart_model.model.encoder.__class__ ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
return super().call(a_ ,**a_ )
lowerCAmelCase__ = FakeEncoder(bart_model.config ,bart_model.model.shared )
lowerCAmelCase__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase__ = bart_model.generate(a_ ).numpy()
with self.assertRaises(a_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a_ ,foo='bar' )
| 604
| 1
|
"""simple docstring"""
from collections.abc import Callable
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> float:
"""simple docstring"""
UpperCamelCase = a
UpperCamelCase = b
if function(UpperCAmelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCAmelCase_ ) == 0:
return b
elif (
function(UpperCAmelCase_ ) * function(UpperCAmelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(UpperCAmelCase_ ) == 0:
return mid
elif function(UpperCAmelCase_ ) * function(UpperCAmelCase_ ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def lowerCamelCase__ ( UpperCAmelCase_ )-> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 554
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False , )-> str:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
UpperCamelCase = TaConfig(
vocab_size=UpperCAmelCase_ , d_model=UpperCAmelCase_ , num_heads=UpperCAmelCase_ , d_kv=UpperCAmelCase_ , d_ff=UpperCAmelCase_ , dropout_rate=UpperCAmelCase_ , feed_forward_proj=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(UpperCAmelCase_ ):
UpperCamelCase = TaBlock(UpperCAmelCase_ )
self.encoders.append(UpperCAmelCase_ )
UpperCamelCase = TaLayerNorm(UpperCAmelCase_ )
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.token_embedder(UpperCAmelCase_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(UpperCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase_ )
UpperCamelCase = self.dropout_pre(UpperCAmelCase_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ )
for lyr in self.encoders:
UpperCamelCase = lyr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
UpperCamelCase = self.layer_norm(UpperCAmelCase_ )
return self.dropout_post(UpperCAmelCase_ ), encoder_inputs_mask
| 554
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __UpperCAmelCase :
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return None
class __UpperCAmelCase :
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return None
class __UpperCAmelCase ( unittest.TestCase ):
A__ : int = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _a ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a_ , "tf" , 12 , **a_ )
@require_torch
@slow
def _a ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a_ , "pt" , 12 , **a_ )
@require_torch
@slow
def _a ( self ):
from transformers import BertModel
lowerCamelCase__ =["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(a_ ) )
vocab_file.flush()
lowerCamelCase__ =BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ =BertModel(BertConfig(vocab_size=len(a_ ) ) )
model.save_pretrained(a_ )
self._test_export(a_ , "pt" , 12 , a_ )
@require_tf
@slow
def _a ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ =self._test_export(a_ , "tf" , 12 , **a_ )
lowerCamelCase__ =quantize(Path(a_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a_ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _a ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ =self._test_export(a_ , "pt" , 12 , **a_ )
lowerCamelCase__ =quantize(a_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a_ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ =Path(a_ ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(a_ , a_ , a_ , a_ , a_ , **a_ )
return path
except Exception as e:
self.fail(a_ )
@require_torch
@require_tokenizers
@slow
def _a ( self ):
from transformers import BertModel
lowerCamelCase__ =BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowerCamelCase__ =BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(a_ , a_ , "pt" )
@require_tf
@require_tokenizers
@slow
def _a ( self ):
from transformers import TFBertModel
lowerCamelCase__ =TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowerCamelCase__ =BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(a_ , a_ , "tf" )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =FeatureExtractionPipeline(a_ , a_ )
lowerCamelCase__ =["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
lowerCamelCase__ =infer_shapes(a_ , a_ )
# Assert all variables are present
self.assertEqual(len(a_ ) , len(a_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , a_ )
self.assertSequenceEqual(variable_names[3:] , a_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _a ( self ):
lowerCamelCase__ =["input_ids", "attention_mask", "token_type_ids"]
lowerCamelCase__ ={"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
lowerCamelCase__ =ensure_valid_input(FuncContiguousArgs() , a_ , a_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(a_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(a_ ) , set(a_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(a_ , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ =ensure_valid_input(FuncNonContiguousArgs() , a_ , a_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(len(a_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _a ( self ):
lowerCamelCase__ =generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 707
|
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase = False ) -> str:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ =F'''Expected string as input, found {type(__lowerCAmelCase )}'''
raise ValueError(__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ =F'''Expected boolean as use_pascal parameter, found {type(__lowerCAmelCase )}'''
raise ValueError(__lowerCAmelCase )
lowerCamelCase__ =input_str.split("_" )
lowerCamelCase__ =0 if use_pascal else 1
lowerCamelCase__ =words[start_index:]
lowerCamelCase__ =[word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase__ ="" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 132
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self :Any , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*__A , **__A )
SCREAMING_SNAKE_CASE__ = {}
def _snake_case ( self :Dict , __A :str , *__A :Tuple , **__A :List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().add_tokens(__A , *__A , **__A )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def _snake_case ( self :Union[str, Any] , __A :Union[str, Any] , *__A :Any , __A :List[str]=1 , **__A :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
else:
SCREAMING_SNAKE_CASE__ = []
for i in range(__A ):
SCREAMING_SNAKE_CASE__ = placeholder_token + f'''_{i}'''
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
SCREAMING_SNAKE_CASE__ = output
def _snake_case ( self :Union[str, Any] , __A :List[str] , __A :Tuple=False , __A :str=1.0 ) -> int:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = []
for i in range(len(__A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE__ = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE__ = tokens[: 1 + int(len(__A ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE__ = copy.copy(__A )
random.shuffle(__A )
SCREAMING_SNAKE_CASE__ = text.replace(__A , """ """.join(__A ) )
return text
def __call__( self :Optional[int] , __A :Union[str, Any] , *__A :Tuple , __A :List[Any]=False , __A :Optional[int]=1.0 , **__A :Tuple ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
def _snake_case ( self :Any , __A :Optional[int] , *__A :str , __A :List[Any]=False , __A :Tuple=1.0 , **__A :Tuple ) -> List[str]:
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
| 6
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Dict = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140
| 0
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCAmelCase :List[str] = (7_2_0, 1_2_8_0) # Height, Width
__UpperCAmelCase :int = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCAmelCase :str = 1 / 1_0_0
__UpperCAmelCase :Any = ""
__UpperCAmelCase :List[str] = ""
__UpperCAmelCase :Tuple = ""
__UpperCAmelCase :Optional[int] = 2_5_0
def _a ( ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : str = get_dataset(_lowercase , _lowercase )
for index in range(_lowercase ):
__UpperCAmelCase : Tuple = random.sample(range(len(_lowercase ) ) , 4 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = update_image_and_anno(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , filter_scale=_lowercase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase : Optional[int] = random_chars(32 )
__UpperCAmelCase : Optional[int] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase : Union[str, Any] = F'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(F'{file_root}.jpg' , _lowercase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
__UpperCAmelCase : List[Any] = []
for anno in new_annos:
__UpperCAmelCase : List[str] = anno[3] - anno[1]
__UpperCAmelCase : str = anno[4] - anno[2]
__UpperCAmelCase : Optional[int] = anno[1] + width / 2
__UpperCAmelCase : List[Any] = anno[2] + height / 2
__UpperCAmelCase : Any = F'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(_lowercase )
with open(F'{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _a ( _lowercase : str , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = []
__UpperCAmelCase : List[str] = []
for label_file in glob.glob(os.path.join(_lowercase , '''*.txt''' ) ):
__UpperCAmelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_lowercase ) as in_file:
__UpperCAmelCase : List[str] = in_file.readlines()
__UpperCAmelCase : Any = os.path.join(_lowercase , F'{label_name}.jpg' )
__UpperCAmelCase : Union[str, Any] = []
for obj_list in obj_lists:
__UpperCAmelCase : Dict = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCAmelCase : Dict = float(obj[1] ) - float(obj[3] ) / 2
__UpperCAmelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
__UpperCAmelCase : int = float(obj[1] ) + float(obj[3] ) / 2
__UpperCAmelCase : Optional[int] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowercase )
labels.append(_lowercase )
return img_paths, labels
def _a ( _lowercase : list , _lowercase : list , _lowercase : list[int] , _lowercase : tuple[int, int] , _lowercase : tuple[float, float] , _lowercase : float = 0.0 , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCAmelCase : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCAmelCase : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCAmelCase : str = int(scale_x * output_size[1] )
__UpperCAmelCase : List[Any] = int(scale_y * output_size[0] )
__UpperCAmelCase : str = []
__UpperCAmelCase : int = []
for i, index in enumerate(_lowercase ):
__UpperCAmelCase : Dict = all_img_list[index]
path_list.append(_lowercase )
__UpperCAmelCase : Tuple = all_annos[index]
__UpperCAmelCase : Optional[Any] = cva.imread(_lowercase )
if i == 0: # top-left
__UpperCAmelCase : List[Any] = cva.resize(_lowercase , (divid_point_x, divid_point_y) )
__UpperCAmelCase : List[Any] = img
for bbox in img_annos:
__UpperCAmelCase : Any = bbox[1] * scale_x
__UpperCAmelCase : int = bbox[2] * scale_y
__UpperCAmelCase : int = bbox[3] * scale_x
__UpperCAmelCase : str = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCAmelCase : Union[str, Any] = cva.resize(_lowercase , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCAmelCase : List[Any] = img
for bbox in img_annos:
__UpperCAmelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
__UpperCAmelCase : Optional[int] = bbox[2] * scale_y
__UpperCAmelCase : str = scale_x + bbox[3] * (1 - scale_x)
__UpperCAmelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCAmelCase : Dict = cva.resize(_lowercase , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCAmelCase : int = img
for bbox in img_annos:
__UpperCAmelCase : Any = bbox[1] * scale_x
__UpperCAmelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCAmelCase : Optional[int] = bbox[3] * scale_x
__UpperCAmelCase : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCAmelCase : List[Any] = cva.resize(
_lowercase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCAmelCase : List[str] = img
for bbox in img_annos:
__UpperCAmelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCAmelCase : str = scale_y + bbox[2] * (1 - scale_y)
__UpperCAmelCase : Union[str, Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCAmelCase : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCAmelCase : List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _a ( _lowercase : int ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase : Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(_lowercase ) for _ in range(_lowercase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 702
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _a ( _lowercase : Any ):
'''simple docstring'''
if hor == 128:
__UpperCAmelCase : Optional[Any] = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
__UpperCAmelCase : Union[str, Any] = (32, 128, 256)
__UpperCAmelCase : str = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
__UpperCAmelCase : Optional[Any] = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
__UpperCAmelCase : Any = (32, 64, 128, 256)
__UpperCAmelCase : Optional[int] = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
__UpperCAmelCase : Any = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
__UpperCAmelCase : str = model.state_dict()
__UpperCAmelCase : int = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 65536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
__UpperCAmelCase : Optional[Any] = UNetaDModel(**_lowercase )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
__UpperCAmelCase : Union[str, Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
hf_value_function.load_state_dict(_lowercase )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : str = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 65536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
__UpperCAmelCase : str = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
__UpperCAmelCase : Optional[Any] = model
__UpperCAmelCase : Optional[int] = UNetaDModel(**_lowercase )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
__UpperCAmelCase : Union[str, Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__UpperCAmelCase : Dict = state_dict.pop(_lowercase )
hf_value_function.load_state_dict(_lowercase )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 266
| 0
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowercase_ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowercase_ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowercase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowercase_ = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
lowercase_ = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
lowercase_ = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
lowercase_ = tf.keras.preprocessing.image.img_to_array(test_image)
lowercase_ = np.expand_dims(test_image, axis=0)
lowercase_ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowercase_ = """Normal"""
if result[0][0] == 1:
lowercase_ = """Abnormality detected"""
| 74
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Dict =1
@register_to_config
def __init__( self : List[Any] , __lowercase : int = 2000 , __lowercase : float = 0.15 , __lowercase : float = 0.01 , __lowercase : float = 1348.0 , __lowercase : float = 1E-5 , __lowercase : int = 1 , ):
'''simple docstring'''
# standard deviation of the initial noise distribution
__a = sigma_max
# setable values
__a = None
self.set_sigmas(__lowercase , __lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : torch.FloatTensor , __lowercase : Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : int , __lowercase : float = None , __lowercase : Union[str, torch.device] = None ):
'''simple docstring'''
__a = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__a = torch.linspace(1 , __lowercase , __lowercase , device=__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : int , __lowercase : float = None , __lowercase : float = None , __lowercase : float = None ):
'''simple docstring'''
__a = sigma_min if sigma_min is not None else self.config.sigma_min
__a = sigma_max if sigma_max is not None else self.config.sigma_max
__a = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowercase , __lowercase )
__a = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__a = torch.exp(torch.linspace(math.log(__lowercase ) , math.log(__lowercase ) , __lowercase ) )
__a = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase_ ( self : Dict , __lowercase : Any , __lowercase : Optional[Any] ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.Generator] = None , __lowercase : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
__a = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__a = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__a = timesteps.to(self.discrete_sigmas.device )
__a = self.discrete_sigmas[timesteps].to(sample.device )
__a = self.get_adjacent_sigma(__lowercase , __lowercase ).to(sample.device )
__a = torch.zeros_like(__lowercase )
__a = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__a = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__a = diffusion.unsqueeze(-1 )
__a = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__a = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowercase , device=sample.device , dtype=sample.dtype )
__a = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__a = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowercase , prev_sample_mean=__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : torch.FloatTensor , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.Generator] = None , __lowercase : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__a = randn_tensor(sample.shape , layout=sample.layout , generator=__lowercase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__a = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__a = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__a = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__a = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__a = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__a = step_size.unsqueeze(-1 )
__a = sample + step_size * model_output
__a = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCamelCase_ ( self : int , __lowercase : torch.FloatTensor , __lowercase : torch.FloatTensor , __lowercase : torch.FloatTensor , ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = timesteps.to(original_samples.device )
__a = self.discrete_sigmas.to(original_samples.device )[timesteps]
__a = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowercase ) * sigmas[:, None, None, None]
)
__a = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 225
| 0
|
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0")
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return principal * daily_interest_rate * days_between_payments
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0")
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0")
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return compound_interest(
lowerCAmelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( A : list ) -> list:
UpperCAmelCase_ : List[str] = len(A )
for i in range(1 , A ):
UpperCAmelCase_ : str = collection[i]
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = i - 1
while low <= high:
UpperCAmelCase_ : Any = (low + high) // 2
if val < collection[mid]:
UpperCAmelCase_ : Any = mid - 1
else:
UpperCAmelCase_ : Tuple = mid + 1
for j in range(A , A , -1 ):
UpperCAmelCase_ : List[Any] = collection[j - 1]
UpperCAmelCase_ : int = val
return collection
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
_UpperCamelCase : Dict = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 541
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_UpperCamelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class snake_case__ ( nn.Module):
def __init__( self : Optional[int] , _A : Tuple ) -> List[str]:
super().__init__()
UpperCAmelCase_ : Tuple = torchvision.models.resnetaaa(pretrained=_A )
UpperCAmelCase_ : Union[str, Any] = list(model.children() )[:-2]
UpperCAmelCase_ : Union[str, Any] = nn.Sequential(*_A )
UpperCAmelCase_ : List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def A ( self : str , _A : Optional[int] ) -> str:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase_ : List[str] = self.pool(self.model(_A ) )
UpperCAmelCase_ : Tuple = torch.flatten(_A , start_dim=2 )
UpperCAmelCase_ : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class snake_case__ ( UpperCamelCase):
def __init__( self : Optional[int] , _A : int , _A : str , _A : int , _A : Dict , _A : int ) -> List[str]:
UpperCAmelCase_ : Any = [json.loads(_A ) for l in open(_A )]
UpperCAmelCase_ : Tuple = os.path.dirname(_A )
UpperCAmelCase_ : Any = tokenizer
UpperCAmelCase_ : Optional[Any] = labels
UpperCAmelCase_ : List[str] = len(_A )
UpperCAmelCase_ : int = max_seq_length
UpperCAmelCase_ : str = transforms
def __len__( self : Tuple ) -> Tuple:
return len(self.data )
def __getitem__( self : Union[str, Any] , _A : Tuple ) -> List[Any]:
UpperCAmelCase_ : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=_A ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase_ : List[str] = sentence[: self.max_seq_length]
UpperCAmelCase_ : List[str] = torch.zeros(self.n_classes )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Tuple = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
UpperCAmelCase_ : List[Any] = self.transforms(_A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def __UpperCAmelCase ( A : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : int = [len(row['''sentence'''] ) for row in batch]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = len(A ), max(A )
UpperCAmelCase_ : Tuple = torch.zeros(A , A , dtype=torch.long )
UpperCAmelCase_ : Dict = torch.zeros(A , A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(A , A ) ):
UpperCAmelCase_ : int = input_row['''sentence''']
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : Union[str, Any] = torch.stack([row['''image'''] for row in batch] )
UpperCAmelCase_ : Optional[int] = torch.stack([row['''label'''] for row in batch] )
UpperCAmelCase_ : Any = torch.stack([row['''image_start_token'''] for row in batch] )
UpperCAmelCase_ : int = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCAmelCase ( ) -> Union[str, Any]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCAmelCase ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 541
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase__ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __a ( ) ->Any:
a__: Tuple = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
a__: str = get_sagemaker_input()
else:
a__: Tuple = get_cluster_input()
return config
def __a ( _SCREAMING_SNAKE_CASE=None ) ->Any:
if subparsers is not None:
a__: int = subparsers.add_parser('config' , description=_SCREAMING_SNAKE_CASE )
else:
a__: Dict = argparse.ArgumentParser('Accelerate config command' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=_SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple:
a__: Optional[int] = get_user_input()
if args.config_file is not None:
a__: Any = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(F'accelerate configuration saved at {config_file}' )
def __a ( ) ->Dict:
a__: Optional[Any] = config_command_parser()
a__: Dict = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 217
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __snake_case ( __lowerCAmelCase ):
a__ = """deit"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=2_24 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowercase)
a__: Optional[int] = hidden_size
a__: List[Any] = num_hidden_layers
a__: Optional[Any] = num_attention_heads
a__: Union[str, Any] = intermediate_size
a__: Optional[Any] = hidden_act
a__: Optional[Any] = hidden_dropout_prob
a__: List[Any] = attention_probs_dropout_prob
a__: List[str] = initializer_range
a__: Optional[int] = layer_norm_eps
a__: Dict = image_size
a__: Dict = patch_size
a__: List[Any] = num_channels
a__: Optional[Any] = qkv_bias
a__: List[Any] = encoder_stride
class __snake_case ( __lowerCAmelCase ):
a__ = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCamelCase_ ( self) -> float:
'''simple docstring'''
return 1e-4
| 217
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__a = nn.Parameter(lowerCAmelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__a = nn.Parameter(lowerCAmelCase__ )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# set torch weights for 1-to-1 comparison
__a = np.asarray(weights[0] )
__a = np.asarray(weights[1] )
__a = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase__ ).view(-1 , lowerCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# set torch weights for 1-to-1 comparison
__a = np.asarray(weights[0] )
__a = np.asarray(weights[1] )
__a = np.asarray(weights[2] )
__a = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase__ ).view(-1 , lowerCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# layernorm 1
__a = weights[0][0][0]
__a = np.asarray(layer_norm_a[0] )
__a = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# lsh weights + output
__a = weights[0][1]
if len(lowerCAmelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase__ , torch_block.attention , lowerCAmelCase__ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase__ , torch_block.attention , lowerCAmelCase__ )
# intermediate weighs
__a = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase__ ) == 4:
__a = intermediate_weights[2]
# layernorm 2
__a = np.asarray(intermediate_weights[0][0] )
__a = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# intermediate dense
__a = np.asarray(intermediate_weights[1][0] )
__a = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
# intermediate out
__a = np.asarray(intermediate_weights[4][0] )
__a = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# reformer model
__a = torch_model.reformer
# word embeds
__a = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase__ ) , )
if isinstance(weights[3] , lowerCAmelCase__ ):
__a = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__a = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__a = nn.Parameter(torch.tensor(lowerCAmelCase__ ) )
__a = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__a = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# output layer norm
__a = np.asarray(weights[7][0] )
__a = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# output embeddings
__a = np.asarray(weights[9][0] )
__a = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# Initialise PyTorch model
__a = ReformerConfig.from_json_file(lowerCAmelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
__a = ReformerModelWithLMHead(lowerCAmelCase__ )
with open(lowerCAmelCase__ , """rb""" ) as f:
__a = pickle.load(lowerCAmelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase__ , lowerCAmelCase__ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 99
|
'''simple docstring'''
def _A ( lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowercase__ , lowercase__ = 1, 1
for _ in range(number_of_steps - 1 ):
lowercase__ , lowercase__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 0
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self : str , a : int = 16 , a : int = 88 , a : Optional[int] = None , a : Optional[int] = None , a : int = 1 , a : float = 0.0 , a : int = 32 , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : str = "geglu" , a : bool = True , a : bool = True , ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = attention_head_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE : Optional[Any] = in_channels
SCREAMING_SNAKE_CASE : List[Any] = torch.nn.GroupNorm(num_groups=a , num_channels=a , eps=1e-6 , affine=a )
SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(a , a )
# 3. Define transformers blocks
SCREAMING_SNAKE_CASE : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
a , a , a , dropout=a , cross_attention_dim=a , activation_fn=a , attention_bias=a , double_self_attention=a , norm_elementwise_affine=a , )
for d in range(a )
] )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(a , a )
def __UpperCamelCase ( self : Any , a : int , a : Optional[int]=None , a : List[str]=None , a : List[str]=None , a : Union[str, Any]=1 , a : Any=None , a : bool = True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = hidden_states.shape
SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states[None, :].reshape(a , a , a , a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
SCREAMING_SNAKE_CASE : Tuple = self.norm(a )
SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , a , a )
SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(a )
# 2. Blocks
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE : Optional[int] = block(
a , encoder_hidden_states=a , timestep=a , cross_attention_kwargs=a , class_labels=a , )
# 3. Output
SCREAMING_SNAKE_CASE : List[str] = self.proj_out(a )
SCREAMING_SNAKE_CASE : int = (
hidden_states[None, None, :]
.reshape(a , a , a , a , a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
SCREAMING_SNAKE_CASE : Dict = hidden_states.reshape(a , a , a , a )
SCREAMING_SNAKE_CASE : int = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a )
| 193
|
from __future__ import annotations
import math
import random
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[Any] = []
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : int = 0
def __UpperCamelCase ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.head == self.tail
def __UpperCamelCase ( self : Optional[int] , a : Any ) -> None:
"""simple docstring"""
self.data.append(a )
SCREAMING_SNAKE_CASE : List[str] = self.tail + 1
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.data[self.head]
SCREAMING_SNAKE_CASE : List[str] = self.head + 1
return ret
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self.tail - self.head
def __UpperCamelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a : Any ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = data
SCREAMING_SNAKE_CASE : MyNode | None = None
SCREAMING_SNAKE_CASE : MyNode | None = None
SCREAMING_SNAKE_CASE : int = 1
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return self.data
def __UpperCamelCase ( self : Optional[int] ) -> MyNode | None:
"""simple docstring"""
return self.left
def __UpperCamelCase ( self : Union[str, Any] ) -> MyNode | None:
"""simple docstring"""
return self.right
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.height
def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = data
def __UpperCamelCase ( self : Any , a : MyNode | None ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = node
def __UpperCamelCase ( self : List[str] , a : MyNode | None ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = node
def __UpperCamelCase ( self : Optional[int] , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = height
def lowerCamelCase__ ( _a):
if node is None:
return 0
return node.get_height()
def lowerCamelCase__ ( _a , _a):
if a > b:
return a
return b
def lowerCamelCase__ ( _a):
print("left rotation node:" , node.get_data())
SCREAMING_SNAKE_CASE : List[str] = node.get_left()
assert ret is not None
node.set_left(ret.get_right())
ret.set_right(_a)
SCREAMING_SNAKE_CASE : List[Any] = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_a)
SCREAMING_SNAKE_CASE : Optional[int] = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_a)
return ret
def lowerCamelCase__ ( _a):
print("right rotation node:" , node.get_data())
SCREAMING_SNAKE_CASE : Dict = node.get_right()
assert ret is not None
node.set_right(ret.get_left())
ret.set_left(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_a)
SCREAMING_SNAKE_CASE : Tuple = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_a)
return ret
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_a))
return right_rotation(_a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_a))
return left_rotation(_a)
def lowerCamelCase__ ( _a , _a):
if node is None:
return MyNode(_a)
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _a))
if (
get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE : List[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE : Tuple = right_rotation(_a)
else:
SCREAMING_SNAKE_CASE : int = lr_rotation(_a)
else:
node.set_right(insert_node(node.get_right() , _a))
if get_height(node.get_right()) - get_height(node.get_left()) == 2:
SCREAMING_SNAKE_CASE : Any = node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE : Union[str, Any] = rl_rotation(_a)
else:
SCREAMING_SNAKE_CASE : int = left_rotation(_a)
SCREAMING_SNAKE_CASE : str = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_a)
return node
def lowerCamelCase__ ( _a):
while True:
SCREAMING_SNAKE_CASE : List[Any] = root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE : str = right_child
return root.get_data()
def lowerCamelCase__ ( _a):
while True:
SCREAMING_SNAKE_CASE : Optional[int] = root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE : List[str] = left_child
return root.get_data()
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = root.get_left()
SCREAMING_SNAKE_CASE : List[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE : Any = get_left_most(_a)
root.set_data(_a)
root.set_right(del_node(_a , _a))
elif left_child is not None:
SCREAMING_SNAKE_CASE : Dict = left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE : str = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data")
return root
else:
root.set_left(del_node(_a , _a))
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_a , _a))
if get_height(_a) - get_height(_a) == 2:
assert right_child is not None
if get_height(right_child.get_right()) > get_height(right_child.get_left()):
SCREAMING_SNAKE_CASE : List[str] = left_rotation(_a)
else:
SCREAMING_SNAKE_CASE : int = rl_rotation(_a)
elif get_height(_a) - get_height(_a) == -2:
assert left_child is not None
if get_height(left_child.get_left()) > get_height(left_child.get_right()):
SCREAMING_SNAKE_CASE : str = right_rotation(_a)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = lr_rotation(_a)
SCREAMING_SNAKE_CASE : List[str] = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1
root.set_height(_a)
return root
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : MyNode | None = None
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
return get_height(self.root )
def __UpperCamelCase ( self : List[Any] , a : Any ) -> None:
"""simple docstring"""
print("insert:" + str(a ) )
SCREAMING_SNAKE_CASE : Any = insert_node(self.root , a )
def __UpperCamelCase ( self : List[Any] , a : Any ) -> None:
"""simple docstring"""
print("delete:" + str(a ) )
if self.root is None:
print("Tree is empty!" )
return
SCREAMING_SNAKE_CASE : Optional[int] = del_node(self.root , a )
def __str__( self : Optional[int] , ) -> str: # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ""
SCREAMING_SNAKE_CASE : Optional[int] = MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE : Any = self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE : Dict = 0
while not q.is_empty():
SCREAMING_SNAKE_CASE : Dict = q.pop()
SCREAMING_SNAKE_CASE : List[Any] = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(a )
q.push(a )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE : List[str] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , a ) - 1:
SCREAMING_SNAKE_CASE : List[str] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCamelCase__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a_ = AVLtree()
a_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 193
| 1
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
return EnvironmentCommand()
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class _UpperCAmelCase( lowerCamelCase ):
@staticmethod
def UpperCAmelCase ( __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = parser.add_parser('''env''')
download_parser.set_defaults(func=__a)
download_parser.add_argument(
'''--accelerate-config_file''' , default=__a , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=__a)
def __init__( self , __a , *__a) -> None:
'''simple docstring'''
_UpperCamelCase = accelerate_config_file
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = '''not installed'''
if is_safetensors_available():
import safetensors
_UpperCamelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''') is not None:
import safetensors
_UpperCamelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_UpperCamelCase = '''not installed'''
_UpperCamelCase = _UpperCamelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__a):
_UpperCamelCase = load_config_from_file(self._accelerate_config_file).to_dict()
_UpperCamelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()])
if isinstance(__a , __a)
else F'''\t{accelerate_config}'''
)
_UpperCamelCase = '''not installed'''
_UpperCamelCase = '''NA'''
if is_torch_available():
import torch
_UpperCamelCase = torch.__version__
_UpperCamelCase = torch.cuda.is_available()
_UpperCamelCase = '''not installed'''
_UpperCamelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
_UpperCamelCase = tf.__version__
try:
# deprecated in v2.1
_UpperCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCamelCase = bool(tf.config.list_physical_devices('''GPU'''))
_UpperCamelCase = '''not installed'''
_UpperCamelCase = '''not installed'''
_UpperCamelCase = '''not installed'''
_UpperCamelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCamelCase = flax.__version__
_UpperCamelCase = jax.__version__
_UpperCamelCase = jaxlib.__version__
_UpperCamelCase = jax.lib.xla_bridge.get_backend().platform
_UpperCamelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''')
print(self.format_dict(__a))
return info
@staticmethod
def UpperCAmelCase ( __a) -> Union[str, Any]:
'''simple docstring'''
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()]) + "\n"
| 19
|
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a , use_karras_sigmas=__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 19
| 1
|
import math
def lowerCamelCase__ ( _a):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( _a = 10001):
try:
SCREAMING_SNAKE_CASE : Optional[int] = int(snake_case__)
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int.") from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one.")
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Any = 2
while len(snake_case__) < nth:
if is_prime(snake_case__):
primes.append(snake_case__)
num += 1
else:
num += 1
return primes[len(snake_case__) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 700
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None # sigma(t_i)
@classmethod
def __UpperCamelCase ( cls : Tuple ) -> int:
"""simple docstring"""
return cls()
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return True
@register_to_config
def __init__( self : List[str] , a : float = 0.02 , a : float = 100 , a : float = 1.007 , a : float = 80 , a : float = 0.05 , a : float = 50 , ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return KarrasVeSchedulerState.create()
def __UpperCamelCase ( self : Tuple , a : KarrasVeSchedulerState , a : int , a : Tuple = () ) -> KarrasVeSchedulerState:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = jnp.arange(0 , a )[::-1].copy()
SCREAMING_SNAKE_CASE : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a , schedule=jnp.array(a , dtype=jnp.floataa ) , timesteps=a , )
def __UpperCamelCase ( self : str , a : KarrasVeSchedulerState , a : jnp.ndarray , a : float , a : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE : Any = random.split(a , num=1 )
SCREAMING_SNAKE_CASE : List[str] = self.config.s_noise * random.normal(key=a , shape=sample.shape )
SCREAMING_SNAKE_CASE : Optional[int] = sigma + gamma * sigma
SCREAMING_SNAKE_CASE : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCamelCase ( self : Union[str, Any] , a : KarrasVeSchedulerState , a : jnp.ndarray , a : float , a : float , a : jnp.ndarray , a : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE : str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a , derivative=a , state=a )
def __UpperCamelCase ( self : Optional[int] , a : KarrasVeSchedulerState , a : jnp.ndarray , a : float , a : float , a : jnp.ndarray , a : jnp.ndarray , a : jnp.ndarray , a : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a , derivative=a , state=a )
def __UpperCamelCase ( self : int , a : KarrasVeSchedulerState , a : Optional[Any] , a : int , a : Dict ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
| 193
| 0
|
'''simple docstring'''
_UpperCAmelCase : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def UpperCamelCase ( lowercase_ : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
lowercase =f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(lowercase_ )
lowercase =''''''.join(bin(lowercase_ )[2:].zfill(8 ) for byte in data )
lowercase =len(lowercase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase =b'''=''' * ((6 - len(lowercase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase_ ) % 6)
else:
lowercase =b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase_ ) , 6 ) ).encode()
+ padding
)
def UpperCamelCase ( lowercase_ : str ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
lowercase =(
'''argument should be a bytes-like object or ASCII string, '''
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(lowercase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase_ , lowercase_ ):
try:
lowercase =encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
lowercase =encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase =encoded_data[:-padding]
lowercase =''''''.join(
bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase =''''''.join(
bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase =[
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase_ ) , 8 )
]
return bytes(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Dict = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 1000
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
A_ = 192
A_ = 768
A_ = 12
A_ = 3
elif vit_name[9:].startswith("small" ):
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
A_ = 768
A_ = 2304
A_ = 8
A_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
elif vit_name[4:].startswith("huge" ):
A_ = 1280
A_ = 5120
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(__UpperCamelCase ).eval()
else:
A_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = model(__UpperCamelCase )
if base_model:
A_ = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 86
| 0
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 715
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667
| 0
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Any ) -> List[Any]:
try:
with open(a__, "rb" ) as flax_state_f:
_SCREAMING_SNAKE_CASE : List[str] = from_bytes(a__, flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a__ ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(a__, a__ )
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Optional[int] ) -> str:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
_SCREAMING_SNAKE_CASE : int = flatten_dict(jax.tree_util.tree_map(lambda lowerCamelCase__ : x.dtype == jnp.bfloataa, a__ ) ).values()
if any(a__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
_SCREAMING_SNAKE_CASE : List[str] = jax.tree_util.tree_map(
lambda lowerCamelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, a__ )
_SCREAMING_SNAKE_CASE : Any = ""
_SCREAMING_SNAKE_CASE : Any = flatten_dict(a__, sep="." )
_SCREAMING_SNAKE_CASE : int = pt_model.state_dict()
# keep track of unexpected & missing keys
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_SCREAMING_SNAKE_CASE : Any = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple_array[:-1] + ["weight"]
_SCREAMING_SNAKE_CASE : str = jnp.transpose(a__, (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_SCREAMING_SNAKE_CASE : int = flax_key_tuple_array[:-1] + ["weight"]
_SCREAMING_SNAKE_CASE : int = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_SCREAMING_SNAKE_CASE : List[str] = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a__ ):
_SCREAMING_SNAKE_CASE : str = (
flax_key_tuple_string.replace("_0", ".0" )
.replace("_1", ".1" )
.replace("_2", ".2" )
.replace("_3", ".3" )
.replace("_4", ".4" )
.replace("_5", ".5" )
.replace("_6", ".6" )
.replace("_7", ".7" )
.replace("_8", ".8" )
.replace("_9", ".9" )
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = ".".join(a__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
_SCREAMING_SNAKE_CASE : str = np.asarray(a__ ) if not isinstance(a__, np.ndarray ) else flax_tensor
_SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(a__ )
# remove from missing keys
missing_keys.remove(a__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a__ )
pt_model.load_state_dict(a__ )
# re-transform missing_keys to list
_SCREAMING_SNAKE_CASE : Optional[int] = list(a__ )
if len(a__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(a__ ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
" use it for predictions and inference." )
return pt_model
| 572
|
'''simple docstring'''
from math import isqrt
def a__ ( a__ ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(a__ ) + 1 ) )
def a__ ( a__ = 10**6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 7
while prime_candidate < max_prime:
primes_count += is_prime(a__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 627
| 0
|
"""simple docstring"""
def __snake_case ( UpperCamelCase ) -> list:
"""simple docstring"""
if len(UpperCamelCase ) <= 1:
return lst
a__ = 1
while i < len(UpperCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a__ , a__ = lst[i], lst[i - 1]
i -= 1
if i == 0:
a__ = 1
return lst
if __name__ == "__main__":
__lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase : int = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 158
|
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
a__ = logging.get_logger()
# the current default level is logging.WARNING
a__ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__magic_name__ )
def _UpperCamelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ = logging.get_verbosity()
a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
a__ = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(__magic_name__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def _UpperCamelCase ( self :int ) -> Tuple:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
a__ = os.getenv('''TRANSFORMERS_VERBOSITY''' , __magic_name__ )
a__ = logging.log_levels[env_level_str]
a__ = logging.get_verbosity()
self.assertEqual(
__magic_name__ , __magic_name__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
a__ = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def _UpperCamelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
a__ = logging.logging.getLogger()
with CaptureLogger(__magic_name__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def _UpperCamelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
a__ = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(__magic_name__ ) as cl:
logger.warning_advice(__magic_name__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__magic_name__ ) as cl:
logger.warning_advice(__magic_name__ )
self.assertEqual(cl.out , msg + '''\n''' )
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 158
| 1
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = ['image_processor', 'tokenizer']
UpperCamelCase_ : str = 'BlipImageProcessor'
UpperCamelCase_ : Any = 'AutoTokenizer'
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
# add QFormer tokenizer
__lowercase = qformer_tokenizer
def __call__( self : Union[str, Any] , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : List[str] , ) -> BatchFeature:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
__lowercase = BatchFeature()
if text is not None:
__lowercase = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
encoding.update(lowerCamelCase__ )
__lowercase = self.qformer_tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
__lowercase = qformer_text_encoding.pop('''input_ids''' )
__lowercase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
__lowercase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
encoding.update(lowerCamelCase__ )
return encoding
def UpperCAmelCase_ ( self : Optional[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : int ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Dict , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase_ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Any , **lowerCamelCase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if os.path.isfile(lowerCamelCase__ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__lowercase = os.path.join(lowerCamelCase__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(lowerCamelCase__ )
return super().save_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def UpperCAmelCase_ ( cls : int , lowerCamelCase__ : int , **lowerCamelCase__ : Any ) -> Tuple:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase__ , subfolder='''qformer_tokenizer''' )
__lowercase = cls._get_arguments_from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
args.append(lowerCamelCase__ )
return cls(*lowerCamelCase__ )
| 332
|
from ..utils import DummyObject, requires_backends
class a ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ['note_seq']
def __init__( self : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ) -> str:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def UpperCAmelCase_ ( cls : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def UpperCAmelCase_ ( cls : Tuple , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 332
| 1
|
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Dict):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True])
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple):
A_ : Optional[int] = tmp_path / """cache"""
A_ : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[int] = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase).read()
_check_sql_dataset(__UpperCAmelCase , __UpperCAmelCase)
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str]):
A_ : List[str] = tmp_path / """cache"""
A_ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Dict = features.copy() if features else default_expected_features
A_ : Optional[int] = (
Features({feature: Value(__UpperCAmelCase) for feature, dtype in features.items()}) if features is not None else None
)
A_ : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase).read()
_check_sql_dataset(__UpperCAmelCase , __UpperCAmelCase)
def lowerCamelCase ( lowerCamelCase : Optional[int]):
with contextlib.closing(sqlitea.connect(__UpperCAmelCase)) as con:
A_ : List[Any] = con.cursor()
cur.execute("""SELECT * FROM dataset""")
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int]):
A_ : Any = tmp_path / """cache"""
A_ : Optional[int] = os.path.join(__UpperCAmelCase , """tmp.sql""")
A_ : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase).read()
SqlDatasetWriter(__UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1).write()
A_ : Dict = iter_sql_file(__UpperCAmelCase)
A_ : Dict = iter_sql_file(__UpperCAmelCase)
for rowa, rowa in zip(__UpperCAmelCase , __UpperCAmelCase):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict):
A_ : List[str] = tmp_path / """cache"""
A_ : List[str] = os.path.join(__UpperCAmelCase , """tmp.sql""")
A_ : List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase).read()
SqlDatasetWriter(__UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2).write()
A_ : List[str] = iter_sql_file(__UpperCAmelCase)
A_ : Tuple = iter_sql_file(__UpperCAmelCase)
for rowa, rowa in zip(__UpperCAmelCase , __UpperCAmelCase):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any]):
A_ : Any = tmp_path / """cache"""
A_ : List[str] = os.path.join(__UpperCAmelCase , """tmp.sql""")
A_ : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase).read()
with pytest.raises(__UpperCAmelCase):
SqlDatasetWriter(__UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0).write()
| 713
|
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=A_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCamelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
UpperCamelCase = CLIPTextModel(A_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=A_ )
UpperCamelCase = CLIPTextModelWithProjection(A_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self , A_ , A_=0 )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = image / 2 + 0.5
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**A_ )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = sd_pipe(**A_ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**A_ )
UpperCamelCase = sd_pipe.to(A_ )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
# forward without prompt embeds
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * ['this is a negative prompt']
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs['prompt']]
UpperCamelCase = sd_pipe(**A_ )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * ['this is a negative prompt']
UpperCamelCase = 3 * [inputs.pop('prompt' )]
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = sd_pipe.encode_prompt(A_ , negative_prompt=A_ )
UpperCamelCase = sd_pipe(
**A_ , prompt_embeds=A_ , negative_prompt_embeds=A_ , pooled_prompt_embeds=A_ , negative_pooled_prompt_embeds=A_ , )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 )-> Dict:
'''simple docstring'''
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
UpperCamelCase = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_inputs(A_ )
UpperCamelCase = pipe(**A_ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 3
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def A_( A : float , A : float , A : int):
UpperCamelCase = x
UpperCamelCase = y
for step in range(A): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(A , 1 , 1))
def A_( A : int = 800 , A : int = 600 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 50 , A : bool = True , ):
UpperCamelCase = Image.new('RGB' , (image_width, image_height))
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(A):
for image_y in range(A):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(A , A , A)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(A)
else:
UpperCamelCase = get_black_and_white_rgb(A)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 3
| 1
|
from __future__ import annotations
def _A (lowerCAmelCase__ :list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError('List is empty' )
return sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : str = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
a_ : str = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
a_ : Tuple = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
a_ : Dict = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowerCamelCase : List[Any] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class SCREAMING_SNAKE_CASE ( __lowercase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "ernie_m"
_SCREAMING_SNAKE_CASE = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] = 2_5_0_0_0_2 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[int] = 1_2 , UpperCamelCase__ : str = 1_2 , UpperCamelCase__ : Union[str, Any] = 3_0_7_2 , UpperCamelCase__ : Optional[Any] = "gelu" , UpperCamelCase__ : int = 0.1 , UpperCamelCase__ : int = 0.1 , UpperCamelCase__ : List[str] = 5_1_4 , UpperCamelCase__ : Tuple = 0.0_2 , UpperCamelCase__ : Tuple = 1 , UpperCamelCase__ : int = 1E-0_5 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict=False , UpperCamelCase__ : int=0.0 , **UpperCamelCase__ : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = is_decoder
UpperCamelCase = act_dropout
| 430
|
from collections.abc import Sequence
def __A(lowerCAmelCase = None ) -> int:
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
_UpperCamelCase = nums[0]
for i in range(1 , len(lowerCAmelCase ) ):
_UpperCamelCase = nums[i]
_UpperCamelCase = max(lowerCAmelCase , ans + num , lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase__ = int(input("Enter number of elements : ").strip())
lowerCamelCase__ = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 612
| 0
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _UpperCamelCase( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : nn.Module , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
super().__init__()
__a : Any = module
__a : Any = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) , nn.Linear(SCREAMING_SNAKE_CASE__ , module.out_features , bias=SCREAMING_SNAKE_CASE__ ) , )
__a : Optional[int] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return self.module(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) + self.adapter(SCREAMING_SNAKE_CASE__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCamelCase( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__SCREAMING_SNAKE_CASE : List[Any] = '''bigscience/bloom-1b7'''
# Constant values
__SCREAMING_SNAKE_CASE : int = 2.1_09_65_95_52_69_25_74
__SCREAMING_SNAKE_CASE : Optional[int] = '''Hello my name is'''
__SCREAMING_SNAKE_CASE : Any = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
__SCREAMING_SNAKE_CASE : int = 10
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : str = AutoTokenizer.from_pretrained(self.model_name )
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[Any] = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'quantization_config' ) )
__a : Optional[int] = config.to_dict()
__a : int = config.to_diff_dict()
__a : Optional[int] = config.to_json_string()
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Dict = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Tuple = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) , self.EXPECTED_OUTPUTS )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : List[str] = BitsAndBytesConfig()
__a : Tuple = True
__a : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE__ , device_map='auto' )
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : str = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) , self.EXPECTED_OUTPUTS )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Any = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE__ , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : str = self.model_fpaa.to(torch.floataa )
__a : int = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : List[str] = self.model_fpaa.half()
# Check this does not throw an error
__a : str = self.model_fpaa.float()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCamelCase( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls : List[str] ):
'''simple docstring'''
__a : List[Any] = 't5-small'
__a : Optional[Any] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : Dict = AutoTokenizer.from_pretrained(cls.model_name )
__a : List[str] = 'Translate in German: Hello, my dog is cute'
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : Optional[Any] = None
# test with `t5-small`
__a : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Optional[int] = model.generate(**SCREAMING_SNAKE_CASE__ )
# test with `flan-t5-small`
__a : Tuple = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
__a : int = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**SCREAMING_SNAKE_CASE__ )
__a : str = modules
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**SCREAMING_SNAKE_CASE__ )
# test with `flan-t5-small`
__a : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Tuple = model.generate(**SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[str] = 'bigscience/bloom-560m'
__a : Tuple = 't5-small'
# Different types of model
__a : Union[str, Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
# Sequence classification model
__a : Tuple = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
# CausalLM model
__a : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
# Seq2seq model
__a : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
super().setUp()
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : Union[str, Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
super().setUp()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) , self.EXPECTED_OUTPUTS )
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[str] = 'facebook/opt-350m'
super().setUp()
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Dict = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE__ ) ):
__a : Tuple = LoRALayer(module.q_proj , rank=1_6 )
__a : List[str] = LoRALayer(module.k_proj , rank=1_6 )
__a : Dict = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : Optional[Any] = model.forward(**SCREAMING_SNAKE_CASE__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = '''gpt2-xl'''
__SCREAMING_SNAKE_CASE : Any = 3.31_91_85_48_54_15_21_87
| 577
|
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : int = equationa
__a , __a , __a : Dict = equationa
# Calculate the determinants of the matrices
__a : Dict = aa * ba - aa * ba
__a : List[Any] = ca * ba - ca * ba
__a : Tuple = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : int = determinant_x / determinant
__a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 577
| 1
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = LEDConfig
lowerCamelCase__ = {}
lowerCamelCase__ = "gelu"
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Any=True , __lowerCamelCase : str=False , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=20 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Dict=4 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tf.concat(
[tf.zeros_like(__lowerCamelCase )[:, :-1], tf.ones_like(__lowerCamelCase )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE = global_attention_mask
return config, inputs_dict
def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = TFLEDModel(config=__lowerCamelCase ).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
def __a ( A__ : int , A__ : Dict , A__ : List[str] , A__ : int=None , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : Dict=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase )
def _snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _snake_case ( self : List[Any] ):
pass
def _snake_case ( self : List[str] ):
# TODO: Head-masking not yet implement
pass
def __a ( A__ : Any ):
return tf.constant(A__ , dtype=tf.intaa )
__A : Optional[int] = 1e-4
@slow
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = (1, 1024, 768)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 , rtol=1e-3 )
| 16
|
'''simple docstring'''
from __future__ import annotations
def __lowercase ( __lowercase , __lowercase ) -> list[int]:
'''simple docstring'''
_A = 0
_A = len(__lowercase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_A = i + 1
else:
_A = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 330
| 0
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
SCREAMING_SNAKE_CASE_ = numpy.array([0, 0])
SCREAMING_SNAKE_CASE_ = numpy.array([0.5, 0.8_6_6_0_2_5_4])
SCREAMING_SNAKE_CASE_ = numpy.array([1, 0])
SCREAMING_SNAKE_CASE_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list[numpy.ndarray]:
"""simple docstring"""
__a = initial_vectors
for _ in range(__SCREAMING_SNAKE_CASE ):
__a = iteration_step(__SCREAMING_SNAKE_CASE )
return vectors
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> list[numpy.ndarray]:
"""simple docstring"""
__a = []
for i, start_vector in enumerate(vectors[:-1] ):
__a = vectors[i + 1]
new_vectors.append(__SCREAMING_SNAKE_CASE )
__a = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> numpy.ndarray:
"""simple docstring"""
__a = numpy.radians(__SCREAMING_SNAKE_CASE )
__a , __a = numpy.cos(__SCREAMING_SNAKE_CASE ), numpy.sin(__SCREAMING_SNAKE_CASE )
__a = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
__a = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a = zip(*__SCREAMING_SNAKE_CASE )
plt.plot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 201
|
'''simple docstring'''
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE_ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
SCREAMING_SNAKE_CASE_ = [2, 4, 1, 5]
SCREAMING_SNAKE_CASE_ = len(train_data)
SCREAMING_SNAKE_CASE_ = 0.0_0_9
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="train" ) -> Optional[int]:
"""simple docstring"""
return calculate_hypothesis_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - output(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__a = 0
for i in range(len(__SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=m ) -> Optional[Any]:
"""simple docstring"""
__a = 0
for i in range(__SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(__SCREAMING_SNAKE_CASE )
else:
summation_value += _error(__SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__a = summation_of_cost_derivative(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def __lowercase ( ) -> str:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__a = 0.000_002
__a = 0
__a = 0
while True:
j += 1
__a = [0, 0, 0, 0]
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
__a = get_cost_derivative(i - 1 )
__a = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE , rtol=__SCREAMING_SNAKE_CASE , ):
break
__a = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __lowercase ( ) -> List[Any]:
"""simple docstring"""
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
print(("""Actual output value:""", output(__SCREAMING_SNAKE_CASE , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__SCREAMING_SNAKE_CASE , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 201
| 1
|
import argparse
import json
from tqdm import tqdm
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=SCREAMING_SNAKE_CASE_ , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=SCREAMING_SNAKE_CASE_ , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=SCREAMING_SNAKE_CASE_ , help="""where to store parsed gold_data_path file""" , )
UpperCamelCase = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
UpperCamelCase = json.load(SCREAMING_SNAKE_CASE_ )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = dpr_record["""question"""]
UpperCamelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(SCREAMING_SNAKE_CASE_ ) + """\n""" )
if __name__ == "__main__":
main()
| 386
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
def _lowercase ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, Iterable[int]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size
UpperCamelCase , UpperCamelCase = get_image_size(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase = output_size
# determine new height and width
UpperCamelCase = output_height / input_height
UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase = scale_width
else:
# fit height
UpperCamelCase = scale_height
UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ )
return (new_height, new_width)
class UpperCAmelCase ( __snake_case ):
lowercase = ["""pixel_values"""]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = False , __magic_name__ : int = 1 , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_5_5 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase = size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4}
UpperCamelCase = get_size_dict(__magic_name__ )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = keep_aspect_ratio
UpperCamelCase = ensure_multiple_of
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : bool = False , __magic_name__ : int = 1 , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase = get_resize_output_image_size(
__magic_name__ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=__magic_name__ , multiple=__magic_name__ , )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ):
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Dict , ):
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : int = None , __magic_name__ : bool = None , __magic_name__ : int = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : int , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(__magic_name__ )
UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Tuple] = None ):
"""simple docstring"""
UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__magic_name__ ):
UpperCamelCase = target_sizes.numpy()
UpperCamelCase = []
for idx in range(len(__magic_name__ ) ):
UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__magic_name__ )
UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__magic_name__ )
else:
UpperCamelCase = logits.argmax(dim=1 )
UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 386
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase , _lowercase=False , _lowercase=False , _lowercase=False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
UpperCAmelCase_ : Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Dict = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase_ : Tuple = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : str = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[int] = val
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Tuple = False
if "vqa" in checkpoint_url:
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : int = 3129
UpperCAmelCase_ : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase_ : Any = '''vqa2-id2label.json'''
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : List[str] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : Dict = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : int = ViltForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
elif "nlvr" in checkpoint_url:
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : List[str] = {0: '''False''', 1: '''True'''}
UpperCAmelCase_ : str = {v: k for k, v in config.idalabel.items()}
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : List[str] = ViltForImagesAndTextClassification(SCREAMING_SNAKE_CASE_ )
elif "irtr" in checkpoint_url:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[Any] = ViltForImageAndTextRetrieval(SCREAMING_SNAKE_CASE_ )
elif "mlm_itm" in checkpoint_url:
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = ViltForMaskedLM(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''state_dict''']
UpperCAmelCase_ : List[str] = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if mlm_model or irtr_model:
UpperCAmelCase_ : Optional[int] = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCAmelCase_, UpperCAmelCase_ : Any = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Define processor
UpperCAmelCase_ : Any = ViltImageProcessor(size=384 )
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase_ : Optional[int] = ViltProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCAmelCase_ : Dict = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=SCREAMING_SNAKE_CASE_ ).raw )
UpperCAmelCase_ : List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=SCREAMING_SNAKE_CASE_ ).raw )
UpperCAmelCase_ : List[Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
UpperCAmelCase_ : Any = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCAmelCase_ : List[str] = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCAmelCase_ : Optional[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCAmelCase_ : str = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=SCREAMING_SNAKE_CASE_ ).raw )
if mlm_model:
UpperCAmelCase_ : Dict = '''a bunch of [MASK] laying on a [MASK].'''
else:
UpperCAmelCase_ : Dict = '''How many cats are there?'''
UpperCAmelCase_ : List[Any] = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCAmelCase_ : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
if mlm_model:
UpperCAmelCase_ : List[str] = torch.Size([1, 11, 30522] )
UpperCAmelCase_ : Any = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
# verify masked token prediction equals "cats"
UpperCAmelCase_ : str = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCAmelCase_ : Union[str, Any] = torch.Size([1, 3129] )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
# verify vqa prediction equals "2"
UpperCAmelCase_ : Any = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCAmelCase_ : Optional[int] = torch.Size([1, 2] )
UpperCAmelCase_ : Optional[int] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 700
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Optional[Any] = 13
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Tuple = 99
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[int] = 32
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : List[Any] = 0.1
UpperCAmelCase_ : int = 0.1
UpperCAmelCase_ : List[str] = 512
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Any = 0.02
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Dict = '''last'''
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Union[str, Any] = 0
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_lengths:
UpperCAmelCase_ : Optional[int] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : int = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Any:
UpperCAmelCase_ : Tuple = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : int = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : List[Any] = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> int:
UpperCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : List[str] = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : List[Any] = self.num_choices
UpperCAmelCase_ : Any = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : str = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Any = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[int] = TFFlaubertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,emb_dim=37 )
def a__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Any:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCAmelCase_ : Dict = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : Optional[int] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,_SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase_ : List[Any] = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 300
| 0
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a_ :
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCAmelCase : Dict = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : Tuple = inputs["""prompt"""]
_lowerCAmelCase : Tuple = inputs["""generator"""]
_lowerCAmelCase : Dict = inputs["""num_inference_steps"""]
_lowerCAmelCase : Optional[Any] = inputs["""output_type"""]
if "image" in inputs:
_lowerCAmelCase : Optional[Any] = inputs["""image"""]
else:
_lowerCAmelCase : Optional[int] = None
if "mask_image" in inputs:
_lowerCAmelCase : int = inputs["""mask_image"""]
else:
_lowerCAmelCase : Union[str, Any] = None
if "original_image" in inputs:
_lowerCAmelCase : List[Any] = inputs["""original_image"""]
else:
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = pipe.encode_prompt(snake_case_ )
# inputs with prompt converted to embeddings
_lowerCAmelCase : Dict = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
_lowerCAmelCase : Dict = image
if mask_image is not None:
_lowerCAmelCase : Union[str, Any] = mask_image
if original_image is not None:
_lowerCAmelCase : List[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = pipe(**snake_case_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case_ )
_lowerCAmelCase : Optional[int] = self.pipeline_class.from_pretrained(snake_case_ )
pipe_loaded.to(snake_case_ )
pipe_loaded.set_progress_bar_config(disable=snake_case_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case_ , snake_case_ ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : str = inputs["""generator"""]
_lowerCAmelCase : Dict = inputs["""num_inference_steps"""]
_lowerCAmelCase : Union[str, Any] = inputs["""output_type"""]
# inputs with prompt converted to embeddings
_lowerCAmelCase : Dict = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
_lowerCAmelCase : List[str] = image
if mask_image is not None:
_lowerCAmelCase : Tuple = mask_image
if original_image is not None:
_lowerCAmelCase : str = original_image
_lowerCAmelCase : str = pipe_loaded(**snake_case_ )[0]
_lowerCAmelCase : int = np.abs(to_np(snake_case_ ) - to_np(snake_case_ ) ).max()
self.assertLess(snake_case_ , 1E-4 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : List[str] = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : List[str] = pipe(**snake_case_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case_ )
_lowerCAmelCase : str = self.pipeline_class.from_pretrained(snake_case_ )
pipe_loaded.to(snake_case_ )
pipe_loaded.set_progress_bar_config(disable=snake_case_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_lowerCAmelCase : Any = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : List[str] = pipe_loaded(**snake_case_ )[0]
_lowerCAmelCase : Optional[Any] = np.abs(to_np(snake_case_ ) - to_np(snake_case_ ) ).max()
self.assertLess(snake_case_ , 1E-4 )
| 384
|
'''simple docstring'''
from collections.abc import Sequence
def _UpperCAmelCase ( _lowerCamelCase : Sequence[float] , _lowerCamelCase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Sequence[float] , _lowerCamelCase : float ) -> float:
_lowerCAmelCase : List[Any] = 0.0
for coeff in reversed(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase_ = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase_ = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 384
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowerCAmelCase : Dict = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def __snake_case ( UpperCamelCase ) -> List[str]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a__ = k.replace(UpperCamelCase , UpperCamelCase )
return k
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> PegasusForConditionalGeneration:
"""simple docstring"""
a__ = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase )
a__ = PegasusConfig(**UpperCamelCase )
a__ = PegasusForConditionalGeneration(UpperCamelCase )
a__ = torch_model.model.state_dict()
a__ = {}
for k, v in tf_weights.items():
a__ = rename_state_dict_key(UpperCamelCase )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
a__ = v.T
a__ = torch.tensor(UpperCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
a__ = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
a__ = mapping['''shared.weight''']
a__ = mapping['''shared.weight''']
a__ = {k: torch.zeros_like(UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**UpperCamelCase )
a__ , a__ = torch_model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
a__ = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def __snake_case ( UpperCamelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
a__ = tf.train.list_variables(UpperCamelCase )
a__ = {}
a__ = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(UpperCamelCase , desc='''converting tf checkpoint to dict''' ):
a__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
a__ = array
return tf_weights
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
a__ = Path(UpperCamelCase ).parent.name
a__ = task_specific_params[f"summarization_{dataset}"]['''max_position_embeddings''']
a__ = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase )
# convert model
a__ = get_tf_weights_as_numpy(UpperCamelCase )
a__ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
a__ = task_specific_params
a__ = convert_pegasus(UpperCamelCase , UpperCamelCase )
torch_model.save_pretrained(UpperCamelCase )
a__ = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(UpperCamelCase , Path(UpperCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCAmelCase : Optional[int] = parser.parse_args()
if args.save_dir is None:
__lowerCAmelCase : Optional[int] = Path(args.tf_ckpt_path).parent.name
__lowerCAmelCase : Any = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 158
|
"""simple docstring"""
import math
import qiskit
def __snake_case ( UpperCamelCase = 1 , UpperCamelCase = 1 , UpperCamelCase = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
a__ = qiskit.QuantumRegister(4 , '''qr''' )
a__ = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
a__ = [input_a, input_a, carry_in]
a__ = qiskit.QuantumCircuit(UpperCamelCase , UpperCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCamelCase ) # measure the last two qbits
a__ = qiskit.Aer.get_backend('''aer_simulator''' )
a__ = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1_000 )
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 158
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.