code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__magic_name__ = get_tests_dir('''fixtures''')
__magic_name__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__magic_name__ = get_tests_dir('''fixtures/dummy-config.json''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> str:
_UpperCAmelCase = 0
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(a_ ).to_dict()
config_dict.pop("feature_extractor_type" )
_UpperCAmelCase = WavaVecaFeatureExtractor(**a_ )
# save in new folder
model_config.save_pretrained(a_ )
config.save_pretrained(a_ )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(a_ )
# make sure private variable is not incorrectly saved
_UpperCAmelCase = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
a_ , "bert-base is not a local folder and is not a valid model identifier" ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained("bert-base" )
def _a ( self ) -> str:
with self.assertRaisesRegex(
a_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(a_ , revision="aaaaaa" )
def _a ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
a_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def _a ( self ) -> List[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a_ ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a_ ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a_ )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(a_ , trust_remote_code=a_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def _a ( self ) -> Any:
try:
AutoConfig.register("custom" , a_ )
AutoFeatureExtractor.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
AutoFeatureExtractor.register(a_ , a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a_ )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _a ( self ) -> Optional[Any]:
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = True
try:
AutoConfig.register("custom" , a_ )
AutoFeatureExtractor.register(a_ , a_ )
# If remote code is not set, the default is to use local
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(a_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__magic_name__ = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
_UpperCAmelCase = [image]
_UpperCAmelCase = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase = torch.stack(UpperCamelCase__ )
return image
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , a_ , a_ ) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a_ , scheduler=a_ )
def _a ( self , a_ ) -> str:
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def _a ( self , a_ , a_ , a_ ) -> str:
# get the original timestep using init_timestep
_UpperCAmelCase = min(int(num_inference_steps * strength ) , a_ )
_UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_=None ) -> Optional[Any]:
if not isinstance(a_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a_ )}" )
_UpperCAmelCase = image.to(device=a_ , dtype=a_ )
if isinstance(a_ , a_ ) and len(a_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(a_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_UpperCAmelCase = init_latents.shape
_UpperCAmelCase = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ )
# get latents
print("add noise to latents at timestep" , a_ )
_UpperCAmelCase = self.scheduler.add_noise(a_ , a_ , a_ )
_UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , a_ = None , a_ = 0.8 , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = None , a_ = "pil" , a_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(a_ )
# 2. Preprocess image
_UpperCAmelCase = preprocess(a_ )
# 3. set timesteps
self.scheduler.set_timesteps(a_ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase = self.get_timesteps(a_ , a_ , self.device )
_UpperCAmelCase = timesteps[:1].repeat(a_ )
# 4. Prepare latent variables
_UpperCAmelCase = self.prepare_latents(a_ , a_ , a_ , self.unet.dtype , self.device , a_ )
_UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(a_ ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(a_ , a_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
a_ , a_ , a_ , eta=a_ , use_clipped_model_output=a_ , generator=a_ , ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(a_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a_ )
| 657 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 1 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ):
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCamelCase__ )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=False , a_=99 , a_=16 , a_=2 , a_=4 , a_=4 , a_="relu" , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.0 , a_=20 , a_=2 , a_=1 , a_=0 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.eos_token_id # Eos Token
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = prepare_mam_aaa_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def _a ( self ) -> Any:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _a ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , a_ , a_ ) -> Dict:
_UpperCAmelCase = MaMaaaModel(config=a_ ).get_decoder().to(a_ ).eval()
_UpperCAmelCase = inputs_dict["input_ids"]
_UpperCAmelCase = inputs_dict["attention_mask"]
_UpperCAmelCase = inputs_dict["head_mask"]
# first forward pass
_UpperCAmelCase = model(a_ , attention_mask=a_ , head_mask=a_ , use_cache=a_ )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_UpperCAmelCase = model(a_ , attention_mask=a_ )["last_hidden_state"]
_UpperCAmelCase = model(a_ , attention_mask=a_ , past_key_values=a_ )[
"last_hidden_state"
]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-2 ) )
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = MaMaaaModel(config=a_ ).to(a_ ).eval()
_UpperCAmelCase = model(**a_ )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(a_ )
_UpperCAmelCase = MaMaaaEncoder.from_pretrained(a_ ).to(a_ )
_UpperCAmelCase = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(a_ )
_UpperCAmelCase = MaMaaaDecoder.from_pretrained(a_ ).to(a_ )
_UpperCAmelCase = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=a_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Optional[int] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowercase_ : List[Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowercase_ : List[str] = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowercase_ : Optional[int] = True
lowercase_ : Dict = True
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = MaMaaaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ )
def _a ( self ) -> Any:
self.config_tester.run_common_tests()
def _a ( self ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(a_ , output_loading_info=a_ )
self.assertEqual(info["missing_keys"] , [] )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a_ )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a_ )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_UpperCAmelCase = model_class(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = copy.deepcopy(self._prepare_for_class(a_ , a_ ) )
if not self.is_encoder_decoder:
_UpperCAmelCase = inputs["input_ids"]
del inputs["input_ids"]
else:
_UpperCAmelCase = inputs["input_ids"]
_UpperCAmelCase = inputs.get("decoder_input_ids" , a_ )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , a_ )
_UpperCAmelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
_UpperCAmelCase = wte(a_ )
else:
_UpperCAmelCase = wte(a_ )
_UpperCAmelCase = wte(a_ )
with torch.no_grad():
model(**a_ )[0]
def _a ( self ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = input_dict["input_ids"]
_UpperCAmelCase = input_ids.ne(1 ).to(a_ )
_UpperCAmelCase = MaMaaaForConditionalGeneration(a_ ).eval().to(a_ )
if torch_device == "cuda":
model.half()
model.generate(a_ , attention_mask=a_ )
model.generate(num_beams=4 , do_sample=a_ , early_stopping=a_ , num_return_sequences=3 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return torch.tensor(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> Tuple:
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(a_ )
_UpperCAmelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
_UpperCAmelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
_UpperCAmelCase = prepare_mam_aaa_inputs_dict(model.config , a_ , a_ )
with torch.no_grad():
_UpperCAmelCase = model(**a_ )[0]
_UpperCAmelCase = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , a_ )
# change to expected output here
_UpperCAmelCase = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=a_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=a_ ) )
def _a ( self ) -> Any:
_UpperCAmelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(a_ )
# change to intended input
_UpperCAmelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
_UpperCAmelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
_UpperCAmelCase = prepare_mam_aaa_inputs_dict(model.config , a_ , a_ )
with torch.no_grad():
_UpperCAmelCase = model(**a_ )[0]
_UpperCAmelCase = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , a_ )
# change to expected output here
_UpperCAmelCase = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=a_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=a_ ) )
def _a ( self ) -> int:
_UpperCAmelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(a_ )
_UpperCAmelCase = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
_UpperCAmelCase = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_UpperCAmelCase = tokenizer(a_ , padding=a_ , return_tensors="pt" )
_UpperCAmelCase = model.generate(
input_ids=dct["input_ids"].to(a_ ) , attention_mask=dct["attention_mask"].to(a_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
_UpperCAmelCase = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
_UpperCAmelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=a_ , skip_special_tokens=a_ )
assert generated == expected_en
| 657 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 1 |
"""simple docstring"""
import os
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = len(grid[0] )
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(UpperCamelCase__ ):
for j in range(n_rows - 3 ):
_UpperCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_UpperCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_UpperCAmelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_UpperCAmelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_UpperCAmelCase = max(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if max_product > largest:
_UpperCAmelCase = max_product
return largest
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = []
with open(os.path.dirname(UpperCamelCase__ ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
_UpperCAmelCase = [[int(UpperCamelCase__ ) for i in grid[j]] for j in range(len(UpperCamelCase__ ) )]
return largest_product(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 657 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__magic_name__ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
__magic_name__ = {
'''facebook/mbart-large-en-ro''': 10_24,
'''facebook/mbart-large-cc25''': 10_24,
}
# fmt: off
__magic_name__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Dict = VOCAB_FILES_NAMES
lowercase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any = ['''input_ids''', '''attention_mask''']
lowercase_ : List[int] = []
lowercase_ : List[int] = []
def __init__( self , a_ , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , a_ = None , a_=None , **a_ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , tokenizer_file=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a_ )
}
_UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCAmelCase = src_lang if src_lang is not None else "en_XX"
_UpperCAmelCase = self.lang_code_to_id[self._src_lang]
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a ( self ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , a_ ) -> None:
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , a_ , a_ = None , a_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def _a ( self , a_ , a_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self , a_ , a_ = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , a_ , a_ , a_ , a_ , **a_ ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
_UpperCAmelCase = self.convert_tokens_to_ids(a_ )
_UpperCAmelCase = tgt_lang_id
return inputs
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , a_ ) -> List[str]:
return self.sp_model.encode(a_ , out_type=a_ )
def _a ( self , a_ ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(a_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self , a_ ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self , a_ ) -> Tuple:
_UpperCAmelCase = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _a ( self , a_ , a_ = None ) -> Tuple[str]:
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def _a ( self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ) -> BatchEncoding:
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def _a ( self ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , a_ ) -> None:
_UpperCAmelCase = self.lang_code_to_id[src_lang]
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
def _a ( self , a_ ) -> None:
_UpperCAmelCase = self.lang_code_to_id[lang]
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
__magic_name__ = 5
__magic_name__ = 10
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : str = SpeechaTextTokenizer
lowercase_ : List[Any] = False
lowercase_ : int = True
def _a ( self ) -> str:
super().setUp()
_UpperCAmelCase = sp.SentencePieceProcessor()
spm_model.Load(a_ )
_UpperCAmelCase = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(a_ ) )]
_UpperCAmelCase = dict(zip(a_ , range(len(a_ ) ) ) )
_UpperCAmelCase = Path(self.tmpdirname )
save_json(a_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self ) -> List[str]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> int:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(a_ ) , 1001 )
def _a ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def _a ( self ) -> Tuple:
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [289, 50, 14, 174, 386] , )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _a ( self ) -> str:
# fmt: off
_UpperCAmelCase = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : int = '''valhalla/s2t_mustc_multilinguial_medium'''
lowercase_ : Any = '''C\'est trop cool'''
lowercase_ : str = '''Esto es genial'''
@classmethod
def _a ( cls ) -> Any:
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def _a ( self ) -> Dict:
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def _a ( self ) -> int:
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def _a ( self ) -> Union[str, Any]:
self.assertIn(a_ , self.tokenizer.all_special_ids )
_UpperCAmelCase = [ES_CODE, 4, 1601, 47, 7647, 2]
_UpperCAmelCase = self.tokenizer.decode(a_ , skip_special_tokens=a_ )
_UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a_ )
self.assertEqual(a_ , a_ )
self.assertNotIn(self.tokenizer.eos_token , a_ )
def _a ( self ) -> str:
_UpperCAmelCase = "fr"
_UpperCAmelCase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , a_ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def _a ( self ) -> Tuple:
_UpperCAmelCase = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
_UpperCAmelCase = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 657 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__magic_name__ = '''bart'''
__magic_name__ = True
@st.cache(allow_output_mutation=UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCAmelCase = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
_UpperCAmelCase = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
_UpperCAmelCase = qar_model.eval()
else:
_UpperCAmelCase , _UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCAmelCase = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
_UpperCAmelCase = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
_UpperCAmelCase = sas_model.eval()
else:
_UpperCAmelCase , _UpperCAmelCase = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCAmelCase = faiss.StandardGpuResources()
_UpperCAmelCase = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
_UpperCAmelCase = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
_UpperCAmelCase = faiss.IndexFlatIP(128 )
_UpperCAmelCase = faiss.index_cpu_to_gpu(UpperCamelCase__ , 1 , UpperCamelCase__ )
wikiaab_gpu_index_flat.add(UpperCamelCase__ ) # TODO fix for larger GPU
else:
_UpperCAmelCase , _UpperCAmelCase = (None, None)
_UpperCAmelCase = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = datasets.load_dataset("eli5" , name="LFQA_reddit" )
_UpperCAmelCase = elia["train_eli5"]
_UpperCAmelCase = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
_UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(UpperCamelCase__ )
return (elia_train, eli5_train_q_index)
__magic_name__ , __magic_name__ , __magic_name__ = load_indexes()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = load_models()
__magic_name__ , __magic_name__ = load_train_data()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = embed_questions_for_retrieval([question] , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase = eli5_train_q_index.search(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = [elia_train[int(UpperCamelCase__ )] for i in I[0]]
return nn_examples
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__="wiki40b" , UpperCamelCase__="dense" , UpperCamelCase__=10 ):
"""simple docstring"""
if source == "none":
_UpperCAmelCase , _UpperCAmelCase = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCAmelCase , _UpperCAmelCase = query_qa_dense_index(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
_UpperCAmelCase , _UpperCAmelCase = query_es_index(
UpperCamelCase__ , UpperCamelCase__ , index_name="english_wiki40b_snippets_100w" , n_results=UpperCamelCase__ , )
_UpperCAmelCase = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
_UpperCAmelCase = "question: {} context: {}".format(UpperCamelCase__ , UpperCamelCase__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase__ : None),
} )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=64 , UpperCamelCase__=256 , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=0.95 , UpperCamelCase__=0.8 ):
"""simple docstring"""
with torch.no_grad():
_UpperCAmelCase = qa_sas_generate(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , num_answers=1 , num_beams=UpperCamelCase__ , min_len=UpperCamelCase__ , max_len=UpperCamelCase__ , do_sample=UpperCamelCase__ , temp=UpperCamelCase__ , top_p=UpperCamelCase__ , top_k=UpperCamelCase__ , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__magic_name__ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__magic_name__ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__magic_name__ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__magic_name__ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__magic_name__ = st.sidebar.checkbox('''Demo options''')
if demo_options:
__magic_name__ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__magic_name__ = action_list.index(action_st)
__magic_name__ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__magic_name__ = show_type == '''Show full text of passages'''
else:
__magic_name__ = 3
__magic_name__ = True
__magic_name__ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__magic_name__ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__magic_name__ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__magic_name__ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__magic_name__ = '''wiki40b'''
__magic_name__ = '''dense'''
__magic_name__ = '''beam'''
__magic_name__ = 2
__magic_name__ = 64
__magic_name__ = 2_56
__magic_name__ = None
__magic_name__ = None
__magic_name__ = st.sidebar.checkbox('''Generation options''')
if generate_options:
__magic_name__ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__magic_name__ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__magic_name__ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__magic_name__ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__magic_name__ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__magic_name__ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__magic_name__ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__magic_name__ = None
# start main text
__magic_name__ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__magic_name__ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__magic_name__ = st.text_input('''Enter your question here:''', '''''')
else:
__magic_name__ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__magic_name__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__magic_name__ = support_list[:10]
__magic_name__ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__magic_name__ , __magic_name__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__magic_name__ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__magic_name__ = res[1].strip()
if sec_titles == "":
__magic_name__ = '''[{}]({})'''.format(res[0], wiki_url)
else:
__magic_name__ = sec_titles.split(''' & ''')
__magic_name__ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__magic_name__ = find_nearest_training(question)
__magic_name__ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__magic_name__ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__magic_name__ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 657 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__magic_name__ = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 1 |
"""simple docstring"""
from statistics import mean
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = 0
# Number of processes finished
_UpperCAmelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
_UpperCAmelCase = [0] * no_of_process
# List to include calculation results
_UpperCAmelCase = [0] * no_of_process
# Sort by arrival time.
_UpperCAmelCase = [burst_time[i] for i in np.argsort(UpperCamelCase__ )]
_UpperCAmelCase = [process_name[i] for i in np.argsort(UpperCamelCase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
_UpperCAmelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
_UpperCAmelCase = arrival_time[i]
_UpperCAmelCase = 0
# Index showing the location of the process being performed
_UpperCAmelCase = 0
# Saves the current response ratio.
_UpperCAmelCase = 0
for i in range(0 , UpperCamelCase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
_UpperCAmelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
_UpperCAmelCase = temp
_UpperCAmelCase = i
# Calculate the turn around time
_UpperCAmelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
_UpperCAmelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = [0] * no_of_process
for i in range(0 , UpperCamelCase__ ):
_UpperCAmelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__magic_name__ = 5
__magic_name__ = ['''A''', '''B''', '''C''', '''D''', '''E''']
__magic_name__ = [1, 2, 3, 4, 5]
__magic_name__ = [1, 2, 3, 4, 5]
__magic_name__ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__magic_name__ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 657 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 1 |
"""simple docstring"""
from math import sqrt
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( UpperCamelCase__ = 1_0001 ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ""
else:
_UpperCAmelCase = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
_UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(UpperCamelCase__ )
_UpperCAmelCase = val
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
_UpperCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCAmelCase = 1000
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = int(deit_name[-6:-4] )
_UpperCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_UpperCAmelCase = 192
_UpperCAmelCase = 768
_UpperCAmelCase = 12
_UpperCAmelCase = 3
elif deit_name[9:].startswith("small" ):
_UpperCAmelCase = 384
_UpperCAmelCase = 1536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_UpperCAmelCase = 1024
_UpperCAmelCase = 4096
_UpperCAmelCase = 24
_UpperCAmelCase = 16
# load original model from timm
_UpperCAmelCase = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
_UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
_UpperCAmelCase = DeiTForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
_UpperCAmelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_UpperCAmelCase = DeiTImageProcessor(size=UpperCamelCase__ , crop_size=config.image_size )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCAmelCase = encoding["pixel_values"]
_UpperCAmelCase = model(UpperCamelCase__ )
_UpperCAmelCase = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__magic_name__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 657 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__magic_name__ = pytest.mark.integration
@require_faiss
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> Any:
_UpperCAmelCase = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(a_ ) for x in np.arange(30 ).tolist()]} )
return dset
def _a ( self ) -> List[Any]:
import faiss
_UpperCAmelCase = self._create_dummy_dataset()
_UpperCAmelCase = dset.map(
lambda a_ , a_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=a_ , keep_in_memory=a_ )
_UpperCAmelCase = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _a ( self ) -> Union[str, Any]:
import faiss
_UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _a ( self ) -> Tuple:
import faiss
_UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _a ( self ) -> Any:
_UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(a_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _a ( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
_UpperCAmelCase = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
_UpperCAmelCase = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
_UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
_UpperCAmelCase = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=a_ )
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> Union[str, Any]:
import faiss
_UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
_UpperCAmelCase = 1
_UpperCAmelCase , _UpperCAmelCase = index.search(a_ )
self.assertRaises(a_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_UpperCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
_UpperCAmelCase , _UpperCAmelCase = index.search_batch(a_ )
self.assertRaises(a_ , index.search_batch , queries[0] )
_UpperCAmelCase = [scores[0] for scores in total_scores]
_UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , a_ )
def _a ( self ) -> Any:
import faiss
_UpperCAmelCase = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_UpperCAmelCase = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(a_ ):
_UpperCAmelCase = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _a ( self ) -> List[str]:
import faiss
_UpperCAmelCase = faiss.IndexFlat(5 )
_UpperCAmelCase = FaissIndex(custom_index=a_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _a ( self ) -> int:
import faiss
_UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
index.save(tmp_file.name )
_UpperCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
_UpperCAmelCase = 1
_UpperCAmelCase , _UpperCAmelCase = index.search(a_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
import faiss
_UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_UpperCAmelCase = "index.faiss"
_UpperCAmelCase = f"mock://{index_name}"
index.save(UpperCamelCase__ , storage_options=mockfs.storage_options )
_UpperCAmelCase = FaissIndex.load(UpperCamelCase__ , storage_options=mockfs.storage_options )
_UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
_UpperCAmelCase = 1
_UpperCAmelCase , _UpperCAmelCase = index.search(UpperCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
_UpperCAmelCase = Elasticsearch()
_UpperCAmelCase = {"acknowledged": True}
_UpperCAmelCase = ElasticSearchIndex(es_client=a_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
_UpperCAmelCase = "foo"
_UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search(a_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_UpperCAmelCase = "foo"
_UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search(a_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_UpperCAmelCase = ["foo", "bar", "foobar"]
_UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search_batch(a_ )
_UpperCAmelCase = [scores[0] for scores in total_scores]
_UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([1, 1, 1] , a_ )
# batched queries with timeout
_UpperCAmelCase = ["foo", "bar", "foobar"]
_UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search_batch(a_ , request_timeout=30 )
_UpperCAmelCase = [scores[0] for scores in total_scores]
_UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([1, 1, 1] , a_ )
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(UpperCamelCase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : List[str] = ['''flax''', '''transformers''']
def __init__( self , *a_ , **a_ ) -> List[str]:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[int]:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Tuple:
requires_backends(cls , ["flax", "transformers"] )
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Optional[int] = ['''flax''', '''transformers''']
def __init__( self , *a_ , **a_ ) -> Dict:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> int:
requires_backends(cls , ["flax", "transformers"] )
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : int = ['''flax''', '''transformers''']
def __init__( self , *a_ , **a_ ) -> Any:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Dict:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Tuple:
requires_backends(cls , ["flax", "transformers"] )
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : List[Any] = ['''flax''', '''transformers''']
def __init__( self , *a_ , **a_ ) -> int:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Any:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[str]:
requires_backends(cls , ["flax", "transformers"] )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=a_ ).to(a_ )
_UpperCAmelCase = AutoTokenizer.from_pretrained("google/mt5-small" )
_UpperCAmelCase = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_UpperCAmelCase = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_UpperCAmelCase = model(input_ids.to(a_ ) , labels=labels.to(a_ ) ).loss
_UpperCAmelCase = -(labels.shape[-1] * loss.item())
_UpperCAmelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 657 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__magic_name__ = '''
Human: <<task>>
Assistant: '''
__magic_name__ = '''huggingface-tools/default-prompts'''
__magic_name__ = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , UpperCamelCase__ ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
UpperCamelCase__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(UpperCamelCase__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 657 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : int = ['''pixel_values''']
def __init__( self , a_ = True , a_ = 32 , a_=PILImageResampling.BILINEAR , a_ = True , **a_ , ) -> None:
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = size_divisor
_UpperCAmelCase = resample
super().__init__(**a_ )
def _a ( self , a_ , a_ , a_ , a_ = None , **a_ ) -> np.ndarray:
_UpperCAmelCase , _UpperCAmelCase = get_image_size(a_ )
# Rounds the height and width down to the closest multiple of size_divisor
_UpperCAmelCase = height // size_divisor * size_divisor
_UpperCAmelCase = width // size_divisor * size_divisor
_UpperCAmelCase = resize(a_ , (new_h, new_w) , resample=a_ , data_format=a_ , **a_ )
return image
def _a ( self , a_ , a_ , a_ = None , **a_ ) -> np.ndarray:
return rescale(image=a_ , scale=a_ , data_format=a_ , **a_ )
def _a ( self , a_ , a_ = None , a_ = None , a_=None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ) -> BatchFeature:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = size_divisor if size_divisor is not None else self.size_divisor
_UpperCAmelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
_UpperCAmelCase = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(a_ ) for img in images]
if do_resize:
_UpperCAmelCase = [self.resize(a_ , size_divisor=a_ , resample=a_ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(a_ , scale=1 / 255 ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(a_ , a_ ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = parquet_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = [parquet_path]
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=("train",) ):
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader({"train": parquet_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if split:
_UpperCAmelCase = {split: parquet_path}
else:
_UpperCAmelCase = "train"
_UpperCAmelCase = {"train": parquet_path, "test": parquet_path}
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase = pq.ParquetFile(tmp_path / "foo.parquet" )
_UpperCAmelCase = pf.read()
assert dataset.data.table == output_table
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(shared_datadir / "test_image_rgb.jpg" )
_UpperCAmelCase = {"image": [image_path]}
_UpperCAmelCase = Features({"image": Image()} )
_UpperCAmelCase = Dataset.from_dict(UpperCamelCase__ , features=UpperCamelCase__ )
_UpperCAmelCase = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=UpperCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert get_writer_batch_size(UpperCamelCase__ ) == expected
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Dict = '''vivit'''
def __init__( self , a_=224 , a_=32 , a_=[2, 16, 16] , a_=3 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu_fast" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1e-06 , a_=True , **a_ , ) -> List[str]:
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
super().__init__(**a_ )
| 657 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
_UpperCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image[0].size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_UpperCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_UpperCAmelCase = np.concatenate(UpperCamelCase__ , axis=0 )
_UpperCAmelCase = np.array(UpperCamelCase__ ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image.transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = 2.0 * image - 1.0
_UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(UpperCamelCase__ , dim=0 )
return image
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
_UpperCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = mask[0].size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
_UpperCAmelCase = np.concatenate(UpperCamelCase__ , axis=0 )
_UpperCAmelCase = mask.astype(np.floataa ) / 255.0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(UpperCamelCase__ , dim=0 )
return mask
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : UNetaDModel
lowercase_ : RePaintScheduler
def __init__( self , a_ , a_ ) -> Tuple:
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ , a_ , a_ = 250 , a_ = 0.0 , a_ = 10 , a_ = 10 , a_ = None , a_ = "pil" , a_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
_UpperCAmelCase = image
_UpperCAmelCase = _preprocess_image(a_ )
_UpperCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
_UpperCAmelCase = _preprocess_mask(a_ )
_UpperCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
_UpperCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(a_ , a_ ) and len(a_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(a_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_UpperCAmelCase = original_image.shape
_UpperCAmelCase = randn_tensor(a_ , generator=a_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a_ , a_ , a_ , self.device )
_UpperCAmelCase = eta
_UpperCAmelCase = self.scheduler.timesteps[0] + 1
_UpperCAmelCase = generator[0] if isinstance(a_ , a_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_UpperCAmelCase = self.unet(a_ , a_ ).sample
# compute previous image: x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(a_ , a_ , a_ , a_ , a_ , a_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_UpperCAmelCase = self.scheduler.undo_step(a_ , a_ , a_ )
_UpperCAmelCase = t
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 657 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 1 |
"""simple docstring"""
from collections import namedtuple
__magic_name__ = namedtuple('''from_to''', '''from_ to''')
__magic_name__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00454, 264.172),
'''cubicyard''': from_to(0.76455, 1.30795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.000236588, 4226.75),
}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ", ".join(UpperCamelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ", ".join(UpperCamelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> str:
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=a_ , )
assert hasattr(self , "env" )
def _a ( self , a_=1 ) -> Union[str, Any]:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=a_ , instance_type=self.instance_type , debugger_hook_config=a_ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _a ( self , a_ ) -> Optional[Any]:
TrainingJobAnalytics(a_ ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
def _a ( self ) -> List[str]:
# create estimator
_UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , a_ )
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Tuple:
_UpperCAmelCase = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_UpperCAmelCase = test_metrics
@require_cpu
def _a ( self ) -> Optional[int]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a ( self ) -> Dict:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a ( self ) -> Union[str, Any]:
self.test_metrics.main()
@require_multi_gpu
def _a ( self ) -> Dict:
print(f"Found {torch.cuda.device_count()} devices." )
_UpperCAmelCase = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_ , env=os.environ.copy() )
| 657 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : int = ['''input_features''']
def __init__( self , a_=80 , a_=16000 , a_=160 , a_=30 , a_=400 , a_=0.0 , a_=False , **a_ , ) -> List[Any]:
super().__init__(
feature_size=a_ , sampling_rate=a_ , padding_value=a_ , return_attention_mask=a_ , **a_ , )
_UpperCAmelCase = n_fft
_UpperCAmelCase = hop_length
_UpperCAmelCase = chunk_length
_UpperCAmelCase = chunk_length * sampling_rate
_UpperCAmelCase = self.n_samples // hop_length
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a_ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=a_ , norm="slaney" , mel_scale="slaney" , )
def _a ( self , a_ ) -> np.ndarray:
_UpperCAmelCase = spectrogram(
a_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_UpperCAmelCase = log_spec[:, :-1]
_UpperCAmelCase = np.maximum(a_ , log_spec.max() - 8.0 )
_UpperCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( a_ , a_ , a_ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_UpperCAmelCase = np.array(a_ , np.intaa )
_UpperCAmelCase = []
for vector, length in zip(a_ , attention_mask.sum(-1 ) ):
_UpperCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_UpperCAmelCase = padding_value
normed_input_values.append(a_ )
else:
_UpperCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , a_ , a_ = True , a_ = None , a_ = None , a_ = None , a_ = "max_length" , a_ = None , a_ = None , a_ = None , **a_ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase = isinstance(a_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_UpperCAmelCase = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
_UpperCAmelCase = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [np.asarray([raw_speech] ).T]
_UpperCAmelCase = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_UpperCAmelCase = self.pad(
a_ , padding=a_ , max_length=max_length if max_length else self.n_samples , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_UpperCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_UpperCAmelCase = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_UpperCAmelCase = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_UpperCAmelCase = [self._np_extract_fbank_features(a_ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a_ ):
_UpperCAmelCase = [np.asarray(a_ , dtype=np.floataa ) for feature in input_features]
else:
_UpperCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_UpperCAmelCase = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_UpperCAmelCase = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def _a ( self ) -> Dict[str, Any]:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutLMv2FeatureExtractor''']
__magic_name__ = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger()
@dataclass
class _lowerCAmelCase :
lowercase_ : nn.Module
lowercase_ : List[nn.Module] = field(default_factory=lowerCamelCase )
lowercase_ : list = field(default_factory=lowerCamelCase )
def _a ( self , a_ , a_ , a_ ) -> str:
_UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self , a_ ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def _a ( self ) -> Optional[int]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCAmelCase :
lowercase_ : nn.Module
lowercase_ : nn.Module
lowercase_ : int = 0
lowercase_ : List = field(default_factory=lowerCamelCase )
lowercase_ : List = field(default_factory=lowerCamelCase )
def __call__( self , a_ ) -> Tuple:
_UpperCAmelCase = Tracker(self.dest )(a_ ).parametrized
_UpperCAmelCase = Tracker(self.src )(a_ ).parametrized
_UpperCAmelCase = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
_UpperCAmelCase = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ):
raise Exception(
f"Numbers of operations are different. Source module has {len(a_ )} operations while"
f" destination module has {len(a_ )}." )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
"""simple docstring"""
print(f"Converting {name}..." )
with torch.no_grad():
_UpperCAmelCase = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
_UpperCAmelCase = ResNetForImageClassification(UpperCamelCase__ ).eval()
_UpperCAmelCase = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
_UpperCAmelCase = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
_UpperCAmelCase = f"resnet{'-'.join(name.split('resnet' ) )}"
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
_UpperCAmelCase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=UpperCamelCase__ , )
print(f"Pushed {checkpoint_name}" )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
"""simple docstring"""
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = 1000
_UpperCAmelCase = (1, num_labels)
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = num_labels
_UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
_UpperCAmelCase = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__magic_name__ = parser.parse_args()
__magic_name__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__magic_name__ = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Dict = ['''pixel_values''']
def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = True , a_ = 1 / 255 , a_ = True , a_ = True , a_ = None , a_ = None , **a_ , ) -> None:
super().__init__(**a_ )
_UpperCAmelCase = size if size is not None else {"shortest_edge": 256}
_UpperCAmelCase = get_size_dict(a_ , default_to_square=a_ )
_UpperCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCAmelCase = get_size_dict(a_ , param_name="crop_size" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = offset
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self , a_ , a_ , a_ = PILImageResampling.BILINEAR , a_ = None , **a_ , ) -> np.ndarray:
_UpperCAmelCase = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(a_ , size["shortest_edge"] , default_to_square=a_ )
elif "height" in size and "width" in size:
_UpperCAmelCase = (size["height"], size["width"])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def _a ( self , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray:
_UpperCAmelCase = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def _a ( self , a_ , a_ , a_ = True , a_ = None , **a_ , ) -> Union[str, Any]:
_UpperCAmelCase = image.astype(np.floataa )
if offset:
_UpperCAmelCase = image - (scale / 2)
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def _a ( self , a_ , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray:
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def _a ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = to_numpy_array(a_ )
if do_resize:
_UpperCAmelCase = self.resize(image=a_ , size=a_ , resample=a_ )
if do_center_crop:
_UpperCAmelCase = self.center_crop(a_ , size=a_ )
if do_rescale:
_UpperCAmelCase = self.rescale(image=a_ , scale=a_ , offset=a_ )
if do_normalize:
_UpperCAmelCase = self.normalize(image=a_ , mean=a_ , std=a_ )
_UpperCAmelCase = to_channel_dimension_format(a_ , a_ )
return image
def _a ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ) -> PIL.Image.Image:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = offset if offset is not None else self.offset
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(a_ , default_to_square=a_ )
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(a_ , param_name="crop_size" )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCAmelCase = make_batched(a_ )
_UpperCAmelCase = [
[
self._preprocess_image(
image=a_ , do_resize=a_ , size=a_ , resample=a_ , do_center_crop=a_ , crop_size=a_ , do_rescale=a_ , rescale_factor=a_ , offset=a_ , do_normalize=a_ , image_mean=a_ , image_std=a_ , data_format=a_ , )
for img in video
]
for video in videos
]
_UpperCAmelCase = {"pixel_values": videos}
return BatchFeature(data=a_ , tensor_type=a_ )
| 657 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 1 |
"""simple docstring"""
import functools
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = len(UpperCamelCase__ )
@functools.cache
def min_distance(UpperCamelCase__ , UpperCamelCase__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_UpperCAmelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , UpperCamelCase__ ) , 1 + min_distance(UpperCamelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__magic_name__ = parser.parse_args()
if args.model_type == "bert":
__magic_name__ = BertForMaskedLM.from_pretrained(args.model_name)
__magic_name__ = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
__magic_name__ = model.state_dict()
__magic_name__ = {}
for w in ["word_embeddings", "position_embeddings"]:
__magic_name__ = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__magic_name__ = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__magic_name__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__magic_name__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__magic_name__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__magic_name__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__magic_name__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__magic_name__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__magic_name__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__magic_name__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__magic_name__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__magic_name__ = state_dict['''cls.predictions.decoder.weight''']
__magic_name__ = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__magic_name__ = state_dict[f'''cls.predictions.transform.dense.{w}''']
__magic_name__ = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 1 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__magic_name__ = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__magic_name__ = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__magic_name__ = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def remove_articles(UpperCamelCase__ ):
_UpperCAmelCase = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(UpperCamelCase__ , " " , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )]
return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 100
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_UpperCAmelCase = Counter(UpperCamelCase__ )
_UpperCAmelCase = Counter(UpperCamelCase__ )
_UpperCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_UpperCAmelCase = scount * numref
_UpperCAmelCase = Counter(UpperCamelCase__ )
_UpperCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_UpperCAmelCase = ccount * numref
# KEEP
_UpperCAmelCase = sgramcounter_rep & cgramcounter_rep
_UpperCAmelCase = keepgramcounter_rep & rgramcounter
_UpperCAmelCase = sgramcounter_rep & rgramcounter
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
_UpperCAmelCase = 1
if len(UpperCamelCase__ ) > 0:
_UpperCAmelCase = keeptmpscorea / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_UpperCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_UpperCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_UpperCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_UpperCAmelCase = sgramcounter_rep - cgramcounter_rep
_UpperCAmelCase = delgramcounter_rep - rgramcounter
_UpperCAmelCase = sgramcounter_rep - rgramcounter
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
if len(UpperCamelCase__ ) > 0:
_UpperCAmelCase = deltmpscorea / len(UpperCamelCase__ )
# ADDITION
_UpperCAmelCase = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
_UpperCAmelCase = set(UpperCamelCase__ ) & set(UpperCamelCase__ )
_UpperCAmelCase = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
_UpperCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
_UpperCAmelCase = 1
if len(UpperCamelCase__ ) > 0:
_UpperCAmelCase = addtmpscore / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_UpperCAmelCase = addtmpscore / len(UpperCamelCase__ )
_UpperCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_UpperCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = ssent.split(" " )
_UpperCAmelCase = csent.split(" " )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for rsent in rsents:
_UpperCAmelCase = rsent.split(" " )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
_UpperCAmelCase = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
_UpperCAmelCase = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
_UpperCAmelCase = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
_UpperCAmelCase = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
_UpperCAmelCase = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
_UpperCAmelCase = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
_UpperCAmelCase = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
_UpperCAmelCase = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
_UpperCAmelCase = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(UpperCamelCase__ )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_UpperCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_UpperCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_UpperCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = "13a" , UpperCamelCase__ = True ):
"""simple docstring"""
if lowercase:
_UpperCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_UpperCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ )
else:
_UpperCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ )
elif tokenizer == "moses":
_UpperCAmelCase = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ )
elif tokenizer == "penn":
_UpperCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ )
else:
_UpperCAmelCase = sentence
if not return_str:
_UpperCAmelCase = normalized_sent.split()
return normalized_sent
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )):
raise ValueError("Sources length must match predictions and references lengths." )
_UpperCAmelCase = 0
for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] )
_UpperCAmelCase = sari_score / len(UpperCamelCase__ )
return 100 * sari_score
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="exp" , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ):
"""simple docstring"""
_UpperCAmelCase = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_UpperCAmelCase = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
_UpperCAmelCase = sacrebleu.corpus_bleu(
UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def _a ( self , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = {}
result.update({"sari": compute_sari(sources=a_ , predictions=a_ , references=a_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=a_ , references=a_ )} )
result.update({"exact": compute_em(predictions=a_ , references=a_ )} )
return result
| 657 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 1 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Dict = '''van'''
def __init__( self , a_=224 , a_=3 , a_=[7, 3, 3, 3] , a_=[4, 2, 2, 2] , a_=[64, 128, 320, 512] , a_=[3, 3, 12, 3] , a_=[8, 8, 4, 4] , a_="gelu" , a_=0.02 , a_=1e-6 , a_=1e-2 , a_=0.0 , a_=0.0 , **a_ , ) -> Dict:
super().__init__(**a_ )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 657 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = 13
_UpperCAmelCase = 7
_UpperCAmelCase = 30
_UpperCAmelCase = self.seq_length + self.mem_len
_UpperCAmelCase = 15
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 99
_UpperCAmelCase = [10, 50, 80]
_UpperCAmelCase = 32
_UpperCAmelCase = 32
_UpperCAmelCase = 4
_UpperCAmelCase = 8
_UpperCAmelCase = 128
_UpperCAmelCase = 2
_UpperCAmelCase = 2
_UpperCAmelCase = None
_UpperCAmelCase = 1
_UpperCAmelCase = 0
_UpperCAmelCase = 3
_UpperCAmelCase = self.vocab_size - 1
_UpperCAmelCase = 0.01
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _a ( self ) -> Optional[int]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _a ( self , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = TFTransfoXLModel(a_ )
_UpperCAmelCase , _UpperCAmelCase = model(a_ ).to_tuple()
_UpperCAmelCase = {"input_ids": input_ids_a, "mems": mems_a}
_UpperCAmelCase , _UpperCAmelCase = model(a_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _a ( self , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = TFTransfoXLLMHeadModel(a_ )
_UpperCAmelCase , _UpperCAmelCase = model(a_ ).to_tuple()
_UpperCAmelCase = {"input_ids": input_ids_a, "labels": lm_labels}
_UpperCAmelCase , _UpperCAmelCase = model(a_ ).to_tuple()
_UpperCAmelCase , _UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
_UpperCAmelCase = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_UpperCAmelCase , _UpperCAmelCase = model(a_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _a ( self , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = TFTransfoXLForSequenceClassification(a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Optional[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase_ : List[str] = () if is_tf_available() else ()
lowercase_ : Any = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : List[str] = False
lowercase_ : Dict = False
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = TFTransfoXLModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , d_embed=37 )
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
self.model_tester.set_seed()
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a_ )
def _a ( self ) -> Optional[Any]:
self.model_tester.set_seed()
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a_ )
def _a ( self ) -> str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(a_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_UpperCAmelCase = model.get_output_embeddings()
assert isinstance(a_ , tf.keras.layers.Layer )
_UpperCAmelCase = model.get_bias()
assert name is None
else:
_UpperCAmelCase = model.get_output_embeddings()
assert x is None
_UpperCAmelCase = model.get_bias()
assert name is None
def _a ( self ) -> Optional[Any]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _a ( self ) -> Tuple:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFTransfoXLModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def _a ( self ) -> int:
pass
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
_UpperCAmelCase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_UpperCAmelCase = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_UpperCAmelCase = model.generate(a_ , max_length=200 , do_sample=a_ )
self.assertListEqual(output_ids[0].numpy().tolist() , a_ )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__magic_name__ = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__magic_name__ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__magic_name__ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = "student" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
"""simple docstring"""
_UpperCAmelCase = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"teacher must be a model or string got type {type(UpperCamelCase__ )}"
_UpperCAmelCase = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
_UpperCAmelCase = teacher.config_class(**UpperCamelCase__ )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
_UpperCAmelCase = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=4 , ) -> Union[str, Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _a ( self ) -> Tuple:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=a_ , )
return config, input_ids, attention_mask
def _a ( self ) -> str:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = FlaxDistilBertModelTester(self )
@slow
def _a ( self ) -> str:
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("distilbert-base-uncased" )
_UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> List[str]:
_UpperCAmelCase = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
_UpperCAmelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase = model(a_ , attention_mask=a_ )[0]
_UpperCAmelCase = (1, 11, 768)
self.assertEqual(output.shape , a_ )
_UpperCAmelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
| 657 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , a_ ) -> List[Any]:
super().__init__()
_UpperCAmelCase = nn.ModuleList(a_ )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = False , a_ = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(a_ , a_ , self.nets ) ):
_UpperCAmelCase , _UpperCAmelCase = controlnet(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
# merge samples
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = down_samples, mid_sample
else:
_UpperCAmelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a_ , a_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _a ( self , a_ , a_ = True , a_ = None , a_ = False , a_ = None , ) -> str:
_UpperCAmelCase = 0
_UpperCAmelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a_ , is_main_process=a_ , save_function=a_ , safe_serialization=a_ , variant=a_ , )
idx += 1
_UpperCAmelCase = model_path_to_save + f"_{idx}"
@classmethod
def _a ( cls , a_ , **a_ ) -> Any:
_UpperCAmelCase = 0
_UpperCAmelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_UpperCAmelCase = pretrained_model_path
while os.path.isdir(a_ ):
_UpperCAmelCase = ControlNetModel.from_pretrained(a_ , **a_ )
controlnets.append(a_ )
idx += 1
_UpperCAmelCase = pretrained_model_path + f"_{idx}"
logger.info(f"{len(a_ )} controlnets loaded from {pretrained_model_path}." )
if len(a_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(a_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(a_ )
| 657 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , a_ , a_=7 , a_=3 , a_=10 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , a_=None , ) -> List[str]:
_UpperCAmelCase = size if size is not None else {"shortest_edge": 18}
_UpperCAmelCase = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_frames
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = crop_size
def _a ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : List[str] = VivitImageProcessor if is_vision_available() else None
def _a ( self ) -> Dict:
_UpperCAmelCase = VivitImageProcessingTester(self )
@property
def _a ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "image_mean" ) )
self.assertTrue(hasattr(a_ , "image_std" ) )
self.assertTrue(hasattr(a_ , "do_normalize" ) )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "do_center_crop" ) )
self.assertTrue(hasattr(a_ , "size" ) )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _a ( self ) -> str:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=a_ )
for video in video_inputs:
self.assertIsInstance(a_ , a_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _a ( self ) -> Union[str, Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for video in video_inputs:
self.assertIsInstance(a_ , a_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _a ( self ) -> Union[str, Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for video in video_inputs:
self.assertIsInstance(a_ , a_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_UpperCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
_UpperCAmelCase = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_UpperCAmelCase = left
_UpperCAmelCase = point
elif point > right:
_UpperCAmelCase = right
_UpperCAmelCase = point
else:
if item < current_item:
_UpperCAmelCase = point - 1
else:
_UpperCAmelCase = point + 1
return None
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_UpperCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , point + 1 , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if collection != sorted(UpperCamelCase__ ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
__magic_name__ = 0
if debug == 1:
__magic_name__ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
__magic_name__ = 67
__magic_name__ = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 1 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__magic_name__ = logging.getLogger(__name__)
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = git.Repo(search_parent_directories=UpperCamelCase__ )
_UpperCAmelCase = {
"repo_id": str(UpperCamelCase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(UpperCamelCase__ , "git_log.json" ) , "w" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=4 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if params.n_gpu <= 0:
_UpperCAmelCase = 0
_UpperCAmelCase = -1
_UpperCAmelCase = True
_UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
_UpperCAmelCase = int(os.environ["WORLD_SIZE"] )
_UpperCAmelCase = int(os.environ["N_GPU_NODE"] )
_UpperCAmelCase = int(os.environ["RANK"] )
# number of nodes / node ID
_UpperCAmelCase = params.world_size // params.n_gpu_per_node
_UpperCAmelCase = params.global_rank // params.n_gpu_per_node
_UpperCAmelCase = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
_UpperCAmelCase = 1
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
_UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
_UpperCAmelCase = params.n_nodes > 1
# summary
_UpperCAmelCase = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 657 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[int] = '''facebook/bart-large-mnli'''
lowercase_ : int = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowercase_ : Any = '''text_classifier'''
lowercase_ : List[Any] = AutoTokenizer
lowercase_ : Any = AutoModelForSequenceClassification
lowercase_ : Optional[int] = ['''text''', ['''text''']]
lowercase_ : List[str] = ['''text''']
def _a ( self ) -> List[str]:
super().setup()
_UpperCAmelCase = self.model.config
_UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
_UpperCAmelCase = int(a_ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = labels
return self.pre_processor(
[text] * len(a_ ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def _a ( self , a_ ) -> List[Any]:
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 657 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 1 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _lowerCAmelCase ( lowerCamelCase ):
@require_torch
def _a ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(a_ )
BertModel.from_pretrained(a_ )
BertTokenizer.from_pretrained(a_ )
pipeline(task="fill-mask" , model=a_ )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = "1"
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _a ( self ) -> Any:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(a_ )
BertModel.from_pretrained(a_ )
BertTokenizer.from_pretrained(a_ )
pipeline(task="fill-mask" , model=a_ )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _a ( self ) -> int:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = "1"
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = "\nfrom transformers import pipeline\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = "1"
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _a ( self ) -> Tuple:
_UpperCAmelCase = "\nfrom transformers import AutoModel\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = "1"
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 657 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
import cva
import numpy as np
class _lowerCAmelCase :
def __init__( self , a_ , a_ ) -> List[str]:
if k in (0.04, 0.06):
_UpperCAmelCase = k
_UpperCAmelCase = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ) -> str:
return str(self.k )
def _a ( self , a_ ) -> tuple[cva.Mat, list[list[int]]]:
_UpperCAmelCase = cva.imread(a_ , 0 )
_UpperCAmelCase , _UpperCAmelCase = img.shape
_UpperCAmelCase = []
_UpperCAmelCase = img.copy()
_UpperCAmelCase = cva.cvtColor(a_ , cva.COLOR_GRAY2RGB )
_UpperCAmelCase , _UpperCAmelCase = np.gradient(a_ )
_UpperCAmelCase = dx**2
_UpperCAmelCase = dy**2
_UpperCAmelCase = dx * dy
_UpperCAmelCase = 0.04
_UpperCAmelCase = self.window_size // 2
for y in range(a_ , h - offset ):
for x in range(a_ , w - offset ):
_UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = (wxx * wyy) - (wxy**2)
_UpperCAmelCase = wxx + wyy
_UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__magic_name__ = HarrisCorner(0.04, 3)
__magic_name__ , __magic_name__ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 657 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import random
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = a[left_index]
_UpperCAmelCase = left_index + 1
for j in range(left_index + 1 , UpperCamelCase__ ):
if a[j] < pivot:
_UpperCAmelCase , _UpperCAmelCase = a[i], a[j]
i += 1
_UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index]
return i - 1
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if left < right:
_UpperCAmelCase = random.randint(UpperCamelCase__ , right - 1 )
_UpperCAmelCase , _UpperCAmelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_UpperCAmelCase = partition(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
quick_sort_random(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
UpperCamelCase__ , pivot_index + 1 , UpperCamelCase__ ) # recursive quicksort to the right of the pivot point
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = input("Enter numbers separated by a comma:\n" ).strip()
_UpperCAmelCase = [int(UpperCamelCase__ ) for item in user_input.split("," )]
quick_sort_random(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 1 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=UpperCamelCase__ , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=UpperCamelCase__ , default=5 )
parser.add_argument("--batch_size" , type=UpperCamelCase__ , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=UpperCamelCase__ , default=1 )
parser.add_argument("--freeze" , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument("--learning_rate" , type=UpperCamelCase__ , default=5E-4 )
parser.add_argument("--seed" , type=UpperCamelCase__ , default=0 )
parser.add_argument("--lr_scheduler_type" , type=UpperCamelCase__ , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=UpperCamelCase__ , default=10 )
parser.add_argument("--weight_decay" , type=UpperCamelCase__ , default=0.01 )
parser.add_argument("--output_dir" , type=UpperCamelCase__ , default="./results" )
return parser.parse_args()
__magic_name__ = load('''accuracy''')
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , a_ ) -> None:
super().__init__()
_UpperCAmelCase = trainer
def _a ( self , a_ , a_ , a_ , **a_ ) -> Any:
if control.should_evaluate:
_UpperCAmelCase = deepcopy(a_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset("codeparrot/codecomplex" , split="train" )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test["test"].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(UpperCamelCase__ ):
_UpperCAmelCase = tokenizer(example["src"] , truncation=UpperCamelCase__ , max_length=1024 )
_UpperCAmelCase = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=train_test_validation["train"].column_names , )
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=UpperCamelCase__ )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
_UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
print("Training..." )
trainer.add_callback(CustomCallback(UpperCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 1 |
"""simple docstring"""
import os
def __lowerCamelCase ( UpperCamelCase__ = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as input_file:
_UpperCAmelCase = [
[int(UpperCamelCase__ ) for element in line.split("," )]
for line in input_file.readlines()
]
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = len(matrix[0] )
_UpperCAmelCase = [[-1 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
_UpperCAmelCase = matrix[i][0]
for j in range(1 , UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
_UpperCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCamelCase__ ):
_UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 657 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 1 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
_UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("RGB" )
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
_UpperCAmelCase = transform(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
return image
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if "visual_encoder" in key:
_UpperCAmelCase = re.sub("visual_encoder*" , "vision_model.encoder" , UpperCamelCase__ )
if "blocks" in key:
_UpperCAmelCase = re.sub(r"blocks" , "layers" , UpperCamelCase__ )
if "attn" in key:
_UpperCAmelCase = re.sub(r"attn" , "self_attn" , UpperCamelCase__ )
if "norm1" in key:
_UpperCAmelCase = re.sub(r"norm1" , "layer_norm1" , UpperCamelCase__ )
if "norm2" in key:
_UpperCAmelCase = re.sub(r"norm2" , "layer_norm2" , UpperCamelCase__ )
if "encoder.norm" in key:
_UpperCAmelCase = re.sub(r"encoder.norm" , "post_layernorm" , UpperCamelCase__ )
if "encoder.patch_embed.proj" in key:
_UpperCAmelCase = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , UpperCamelCase__ )
if "encoder.pos_embed" in key:
_UpperCAmelCase = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , UpperCamelCase__ )
if "encoder.cls_token" in key:
_UpperCAmelCase = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , UpperCamelCase__ )
if "self_attn" in key:
_UpperCAmelCase = re.sub(r"self_attn.proj" , "self_attn.projection" , UpperCamelCase__ )
return key
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=None ):
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase = BlipConfig.from_pretrained(UpperCamelCase__ )
else:
_UpperCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
_UpperCAmelCase = BlipForConditionalGeneration(UpperCamelCase__ ).eval()
_UpperCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
_UpperCAmelCase = blip_decoder(pretrained=UpperCamelCase__ , image_size=384 , vit="base" )
_UpperCAmelCase = pt_model.eval()
_UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(UpperCamelCase__ )
_UpperCAmelCase = rename_key(UpperCamelCase__ )
_UpperCAmelCase = value
hf_model.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase = 384
_UpperCAmelCase = load_demo_image(image_size=UpperCamelCase__ , device="cpu" )
_UpperCAmelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
_UpperCAmelCase = tokenizer(["a picture of"] ).input_ids
_UpperCAmelCase = hf_model.generate(UpperCamelCase__ , UpperCamelCase__ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
_UpperCAmelCase = hf_model.generate(UpperCamelCase__ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCamelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCAmelCase = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
_UpperCAmelCase = blip_vqa(pretrained=UpperCamelCase__ , image_size=UpperCamelCase__ , vit="base" )
vqa_model.eval()
_UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(UpperCamelCase__ )
_UpperCAmelCase = rename_key(UpperCamelCase__ )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForQuestionAnswering(UpperCamelCase__ )
hf_vqa_model.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase = ["How many dogs are in this image?"]
_UpperCAmelCase = tokenizer(UpperCamelCase__ , return_tensors="pt" ).input_ids
_UpperCAmelCase = hf_vqa_model.generate(UpperCamelCase__ , UpperCamelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
_UpperCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
_UpperCAmelCase = blip_itm(pretrained=UpperCamelCase__ , image_size=UpperCamelCase__ , vit="base" )
itm_model.eval()
_UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(UpperCamelCase__ )
_UpperCAmelCase = rename_key(UpperCamelCase__ )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForImageTextRetrieval(UpperCamelCase__ )
_UpperCAmelCase = ["A picture of a woman with a dog sitting in a beach"]
_UpperCAmelCase = tokenizer(
UpperCamelCase__ , return_tensors="pt" , padding="max_length" , truncation=UpperCamelCase__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(UpperCamelCase__ )
hf_itm_model.eval()
_UpperCAmelCase = hf_itm_model(UpperCamelCase__ , UpperCamelCase__ , use_itm_head=UpperCamelCase__ )
_UpperCAmelCase = hf_itm_model(UpperCamelCase__ , UpperCamelCase__ , use_itm_head=UpperCamelCase__ )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__magic_name__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 657 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> None:
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , a_ , a_=7 , a_=3 , a_=30 , a_=400 , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , a_=True , a_=1 / 255 , a_=True , ) -> Optional[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_UpperCAmelCase = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def _a ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a ( self , a_ , a_=False ) -> Any:
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(a_ , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size["shortest_edge"] * h / w )
_UpperCAmelCase = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase = self.size["shortest_edge"]
_UpperCAmelCase = int(self.size["shortest_edge"] * w / h )
else:
_UpperCAmelCase = self.size["shortest_edge"]
_UpperCAmelCase = self.size["shortest_edge"]
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(a_ , key=lambda a_ : item[0] )[0]
_UpperCAmelCase = max(a_ , key=lambda a_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = ConditionalDetrImageProcessor if is_vision_available() else None
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> str:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "image_mean" ) )
self.assertTrue(hasattr(a_ , "image_std" ) )
self.assertTrue(hasattr(a_ , "do_normalize" ) )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , a_ )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a_ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , a_ )
def _a ( self ) -> Optional[Any]:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_ )
_UpperCAmelCase = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(a_ , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> Any:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(a_ , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a ( self ) -> List[str]:
# prepare image and target
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {"image_id": 39769, "annotations": target}
# encode them
_UpperCAmelCase = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
_UpperCAmelCase = image_processing(images=a_ , annotations=a_ , return_tensors="pt" )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a_ )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a_ , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a_ ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a_ )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a_ , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a_ ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a_ ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a_ ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a_ ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a_ ) )
@slow
def _a ( self ) -> Optional[int]:
# prepare image, target and masks_path
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
_UpperCAmelCase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_UpperCAmelCase = ConditionalDetrImageProcessor(format="coco_panoptic" )
_UpperCAmelCase = image_processing(images=a_ , annotations=a_ , masks_path=a_ , return_tensors="pt" )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a_ )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a_ , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a_ ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a_ )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a_ , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a_ ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a_ ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a_ ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a_ )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a_ ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a_ ) )
| 657 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
class _lowerCAmelCase :
def __init__( self , a_ ) -> None:
_UpperCAmelCase = size
# approximate the overall size of segment tree with given value
_UpperCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCAmelCase = [0 for i in range(0 , 4 * size )]
_UpperCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self , a_ ) -> int:
return idx * 2
def _a ( self , a_ ) -> int:
return idx * 2 + 1
def _a ( self , a_ , a_ , a_ , a_ ) -> None:
if left_element == right_element:
_UpperCAmelCase = a[left_element - 1]
else:
_UpperCAmelCase = (left_element + right_element) // 2
self.build(self.left(a_ ) , a_ , a_ , a_ )
self.build(self.right(a_ ) , mid + 1 , a_ , a_ )
_UpperCAmelCase = max(
self.segment_tree[self.left(a_ )] , self.segment_tree[self.right(a_ )] )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> bool:
if self.flag[idx] is True:
_UpperCAmelCase = self.lazy[idx]
_UpperCAmelCase = False
if left_element != right_element:
_UpperCAmelCase = self.lazy[idx]
_UpperCAmelCase = self.lazy[idx]
_UpperCAmelCase = True
_UpperCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCAmelCase = val
if left_element != right_element:
_UpperCAmelCase = val
_UpperCAmelCase = val
_UpperCAmelCase = True
_UpperCAmelCase = True
return True
_UpperCAmelCase = (left_element + right_element) // 2
self.update(self.left(a_ ) , a_ , a_ , a_ , a_ , a_ )
self.update(self.right(a_ ) , mid + 1 , a_ , a_ , a_ , a_ )
_UpperCAmelCase = max(
self.segment_tree[self.left(a_ )] , self.segment_tree[self.right(a_ )] )
return True
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> int | float:
if self.flag[idx] is True:
_UpperCAmelCase = self.lazy[idx]
_UpperCAmelCase = False
if left_element != right_element:
_UpperCAmelCase = self.lazy[idx]
_UpperCAmelCase = self.lazy[idx]
_UpperCAmelCase = True
_UpperCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCAmelCase = (left_element + right_element) // 2
_UpperCAmelCase = self.query(self.left(a_ ) , a_ , a_ , a_ , a_ )
_UpperCAmelCase = self.query(self.right(a_ ) , mid + 1 , a_ , a_ , a_ )
return max(a_ , a_ )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , a_ , a_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__magic_name__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__magic_name__ = 15
__magic_name__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = [[float("inf" ) for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_UpperCAmelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(UpperCamelCase__ ):
# looping through rows of graph array
for i in range(UpperCamelCase__ ):
# looping through columns of graph array
for j in range(UpperCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_UpperCAmelCase = dist[i][k] + dist[k][j]
_print_dist(UpperCamelCase__ , UpperCamelCase__ )
return dist, v
if __name__ == "__main__":
__magic_name__ = int(input('''Enter number of vertices: '''))
__magic_name__ = int(input('''Enter number of edges: '''))
__magic_name__ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
__magic_name__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
__magic_name__ = int(input('''Enter source:'''))
__magic_name__ = int(input('''Enter destination:'''))
__magic_name__ = float(input('''Enter weight:'''))
__magic_name__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 657 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase ):
lowercase_ : Optional[int] = 1
@register_to_config
def __init__( self , a_=2000 , a_=0.1 , a_=20 , a_=1e-3 ) -> Tuple:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
def _a ( self , a_ , a_ = None ) -> int:
_UpperCAmelCase = torch.linspace(1 , self.config.sampling_eps , a_ , device=a_ )
def _a ( self , a_ , a_ , a_ , a_=None ) -> int:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_UpperCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_UpperCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_UpperCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
_UpperCAmelCase = std.unsqueeze(-1 )
_UpperCAmelCase = -score / std
# compute
_UpperCAmelCase = -1.0 / len(self.timesteps )
_UpperCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_UpperCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_UpperCAmelCase = beta_t.unsqueeze(-1 )
_UpperCAmelCase = -0.5 * beta_t * x
_UpperCAmelCase = torch.sqrt(a_ )
_UpperCAmelCase = drift - diffusion**2 * score
_UpperCAmelCase = x + drift * dt
# add noise
_UpperCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=a_ , device=x.device , dtype=x.dtype )
_UpperCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
__magic_name__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for attribute in key.split("." ):
_UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
_UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(UpperCamelCase__ )[0].split("." )[-2]
_UpperCAmelCase = mapped_key.replace("*" , UpperCamelCase__ )
if "weight_g" in name:
_UpperCAmelCase = "weight_g"
elif "weight_v" in name:
_UpperCAmelCase = "weight_v"
elif "bias" in name:
_UpperCAmelCase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = "weight"
else:
_UpperCAmelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f"Unused weights: {unused_weights}" )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = full_name.split("conv_layers." )[-1]
_UpperCAmelCase = name.split("." )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True ):
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ )
else:
_UpperCAmelCase = UniSpeechSatConfig()
_UpperCAmelCase = ""
if is_finetuned:
_UpperCAmelCase = UniSpeechSatForCTC(UpperCamelCase__ )
else:
_UpperCAmelCase = UniSpeechSatForPreTraining(UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__magic_name__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(UpperCamelCase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 657 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Optional[Any] = DDIMPipeline
lowercase_ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ : Tuple = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
lowercase_ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ : Tuple = False
def _a ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {"unet": unet, "scheduler": scheduler}
return components
def _a ( self , a_ , a_=0 ) -> Any:
if str(a_ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(a_ )
else:
_UpperCAmelCase = torch.Generator(device=a_ ).manual_seed(a_ )
_UpperCAmelCase = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = "cpu"
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase = self.get_dummy_inputs(a_ )
_UpperCAmelCase = pipe(**a_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_UpperCAmelCase = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1e-3 )
def _a ( self ) -> Union[str, Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _a ( self ) -> int:
super().test_save_load_local(expected_max_difference=3e-3 )
def _a ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _a ( self ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "google/ddpm-cifar10-32"
_UpperCAmelCase = UNetaDModel.from_pretrained(a_ )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = DDIMPipeline(unet=a_ , scheduler=a_ )
ddim.to(a_ )
ddim.set_progress_bar_config(disable=a_ )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ddim(generator=a_ , eta=0.0 , output_type="numpy" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ) -> Dict:
_UpperCAmelCase = "google/ddpm-ema-bedroom-256"
_UpperCAmelCase = UNetaDModel.from_pretrained(a_ )
_UpperCAmelCase = DDIMScheduler.from_pretrained(a_ )
_UpperCAmelCase = DDIMPipeline(unet=a_ , scheduler=a_ )
ddpm.to(a_ )
ddpm.set_progress_bar_config(disable=a_ )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ddpm(generator=a_ , output_type="numpy" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCAmelCase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = generate_pascal_triangle(UpperCamelCase__ )
for row_idx in range(UpperCamelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_UpperCAmelCase = []
for current_row_idx in range(UpperCamelCase__ ):
_UpperCAmelCase = populate_current_row(UpperCamelCase__ , UpperCamelCase__ )
triangle.append(UpperCamelCase__ )
return triangle
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_UpperCAmelCase , _UpperCAmelCase = 1, 1
for current_col_idx in range(1 , UpperCamelCase__ ):
calculate_current_element(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return current_row
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
"""simple docstring"""
_UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
_UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx]
_UpperCAmelCase = above_to_left_elt + above_to_right_elt
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_UpperCAmelCase = [[1]]
for row_index in range(1 , UpperCamelCase__ ):
_UpperCAmelCase = [0] + result[-1] + [0]
_UpperCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
_UpperCAmelCase = sum(divmod(UpperCamelCase__ , 2 ) )
_UpperCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_UpperCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_UpperCAmelCase = row_first_half + row_second_half
result.append(UpperCamelCase__ )
return result
def __lowerCamelCase ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase__ , UpperCamelCase__ ) -> None:
_UpperCAmelCase = f"{func.__name__}({value})"
_UpperCAmelCase = timeit(f"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase__ , UpperCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 1 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__magic_name__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__magic_name__ = get_tests_dir('''fixtures/vocab.json''')
__magic_name__ = get_tests_dir('''fixtures''')
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def _a ( self ) -> Any:
_UpperCAmelCase = 0
def _a ( self ) -> Dict:
_UpperCAmelCase = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig()
_UpperCAmelCase = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a_ )
processor.save_pretrained(a_ )
_UpperCAmelCase = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a_ , os.path.join(a_ , a_ ) )
copyfile(a_ , os.path.join(a_ , "vocab.json" ) )
_UpperCAmelCase = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaFeatureExtractor()
_UpperCAmelCase = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
_UpperCAmelCase = WavaVecaProcessor(a_ , a_ )
# save in new folder
processor.save_pretrained(a_ )
# drop `processor_class` in tokenizer
with open(os.path.join(a_ , a_ ) , "r" ) as f:
_UpperCAmelCase = json.load(a_ )
config_dict.pop("processor_class" )
with open(os.path.join(a_ , a_ ) , "w" ) as f:
f.write(json.dumps(a_ ) )
_UpperCAmelCase = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaFeatureExtractor()
_UpperCAmelCase = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
_UpperCAmelCase = WavaVecaProcessor(a_ , a_ )
# save in new folder
processor.save_pretrained(a_ )
# drop `processor_class` in feature extractor
with open(os.path.join(a_ , a_ ) , "r" ) as f:
_UpperCAmelCase = json.load(a_ )
config_dict.pop("processor_class" )
with open(os.path.join(a_ , a_ ) , "w" ) as f:
f.write(json.dumps(a_ ) )
_UpperCAmelCase = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a_ )
# copy relevant files
copyfile(a_ , os.path.join(a_ , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a_ , a_ ) , "w" ) as f:
f.write("{}" )
_UpperCAmelCase = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a_ ):
_UpperCAmelCase = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a_ ):
_UpperCAmelCase = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a_ )
_UpperCAmelCase = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
_UpperCAmelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
_UpperCAmelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
_UpperCAmelCase = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a_ , use_fast=a_ )
_UpperCAmelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _a ( self ) -> int:
try:
AutoConfig.register("custom" , a_ )
AutoFeatureExtractor.register(a_ , a_ )
AutoTokenizer.register(a_ , slow_tokenizer_class=a_ )
AutoProcessor.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
AutoProcessor.register(a_ , a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase = CustomTokenizer(a_ )
_UpperCAmelCase = CustomProcessor(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a_ )
_UpperCAmelCase = AutoProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _a ( self ) -> int:
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = False
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Tuple = False
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Tuple = '''AutoFeatureExtractor'''
lowercase_ : Dict = '''AutoTokenizer'''
lowercase_ : str = False
try:
AutoConfig.register("custom" , a_ )
AutoFeatureExtractor.register(a_ , a_ )
AutoTokenizer.register(a_ , slow_tokenizer_class=a_ )
AutoProcessor.register(a_ , a_ )
# If remote code is not set, the default is to use local classes.
_UpperCAmelCase = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_UpperCAmelCase = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a_ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_UpperCAmelCase = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a_ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _a ( self ) -> int:
_UpperCAmelCase = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def _a ( self ) -> Any:
_UpperCAmelCase = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _a ( cls ) -> Union[str, Any]:
_UpperCAmelCase = TOKEN
HfFolder.save_token(a_ )
@classmethod
def _a ( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def _a ( self ) -> int:
_UpperCAmelCase = WavaVecaProcessor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a_ , "test-processor" ) , push_to_hub=a_ , use_auth_token=self._token )
_UpperCAmelCase = WavaVecaProcessor.from_pretrained(f"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(new_processor.feature_extractor , a_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _a ( self ) -> Tuple:
_UpperCAmelCase = WavaVecaProcessor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a_ , "test-processor-org" ) , push_to_hub=a_ , use_auth_token=self._token , organization="valid_org" , )
_UpperCAmelCase = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(new_processor.feature_extractor , a_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _a ( self ) -> str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase = CustomTokenizer(a_ )
_UpperCAmelCase = CustomProcessor(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"{USER}/test-dynamic-processor" , token=self._token )
_UpperCAmelCase = Repository(a_ , clone_from=f"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(a_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a_ , "tokenizer_config.json" ) ) as f:
_UpperCAmelCase = json.load(a_ )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a_ , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a_ , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a_ , "custom_processing.py" ) ) )
repo.push_to_hub()
_UpperCAmelCase = AutoProcessor.from_pretrained(f"{USER}/test-dynamic-processor" , trust_remote_code=a_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 657 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = 0
def _a ( self ) -> List[str]:
_UpperCAmelCase = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = Path(a_ ) / "preprocessor_config.json"
_UpperCAmelCase = Path(a_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(a_ , "w" ) , )
json.dump({"model_type": "clip"} , open(a_ , "w" ) )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = Path(a_ ) / "preprocessor_config.json"
_UpperCAmelCase = Path(a_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(a_ , "w" ) , )
json.dump({"model_type": "clip"} , open(a_ , "w" ) )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCAmelCase = Path(a_ ) / "preprocessor_config.json"
_UpperCAmelCase = Path(a_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(a_ , "w" ) , )
json.dump({"model_type": "clip"} , open(a_ , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCAmelCase = AutoImageProcessor.from_pretrained(a_ ).to_dict()
config_dict.pop("image_processor_type" )
_UpperCAmelCase = CLIPImageProcessor(**a_ )
# save in new folder
model_config.save_pretrained(a_ )
config.save_pretrained(a_ )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(a_ )
# make sure private variable is not incorrectly saved
_UpperCAmelCase = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = Path(a_ ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(a_ , "w" ) , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def _a ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
a_ , "clip-base is not a local folder and is not a valid model identifier" ):
_UpperCAmelCase = AutoImageProcessor.from_pretrained("clip-base" )
def _a ( self ) -> str:
with self.assertRaisesRegex(
a_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_UpperCAmelCase = AutoImageProcessor.from_pretrained(a_ , revision="aaaaaa" )
def _a ( self ) -> Any:
with self.assertRaisesRegex(
a_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _a ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a_ ):
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a_ ):
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a_ )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a_ )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(a_ , trust_remote_code=a_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _a ( self ) -> List[str]:
try:
AutoConfig.register("custom" , a_ )
AutoImageProcessor.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
AutoImageProcessor.register(a_ , a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = Path(a_ ) / "preprocessor_config.json"
_UpperCAmelCase = Path(a_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(a_ , "w" ) , )
json.dump({"model_type": "clip"} , open(a_ , "w" ) )
_UpperCAmelCase = CustomImageProcessor.from_pretrained(a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a_ )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _a ( self ) -> int:
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : int = True
try:
AutoConfig.register("custom" , a_ )
AutoImageProcessor.register(a_ , a_ )
# If remote code is not set, the default is to use local
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(a_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 657 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(UpperCamelCase__ ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase = math.log(len(UpperCamelCase__ ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 657 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__magic_name__ = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCamelCase__=None , UpperCamelCase__=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class _lowerCAmelCase :
lowercase_ : List[str] = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
lowercase_ : List[int] = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
lowercase_ : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
lowercase_ : bool = field(default=lowerCamelCase , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
lowercase_ : bool = field(default=lowerCamelCase , metadata={'''help''': '''Benchmark training of model'''} )
lowercase_ : bool = field(default=lowerCamelCase , metadata={'''help''': '''Verbose memory tracing'''} )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
lowercase_ : bool = field(default=lowerCamelCase , metadata={'''help''': '''Trace memory line by line'''} )
lowercase_ : bool = field(default=lowerCamelCase , metadata={'''help''': '''Save result to a CSV file'''} )
lowercase_ : bool = field(default=lowerCamelCase , metadata={'''help''': '''Save all print statements in a log file'''} )
lowercase_ : bool = field(default=lowerCamelCase , metadata={'''help''': '''Whether to print environment information'''} )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
lowercase_ : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
lowercase_ : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
lowercase_ : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
lowercase_ : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
lowercase_ : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
lowercase_ : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
lowercase_ : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def _a ( self ) -> List[str]:
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , a_ , )
def _a ( self ) -> Any:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _a ( self ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def _a ( self ) -> int:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 657 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
__magic_name__ = {'''bert_for_seq_generation''': 5_12}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Dict = VOCAB_FILES_NAMES
lowercase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[int] = []
lowercase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self , a_ , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<pad>" , a_="<::::>" , a_ = None , **a_ , ) -> None:
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , sep_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _a ( self ) -> Any:
return self.sp_model.get_piece_size()
def _a ( self ) -> Dict:
_UpperCAmelCase = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , a_ ) -> Dict:
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , a_ ) -> List[str]:
return self.sp_model.encode(a_ , out_type=a_ )
def _a ( self , a_ ) -> str:
return self.sp_model.piece_to_id(a_ )
def _a ( self , a_ ) -> List[str]:
_UpperCAmelCase = self.sp_model.IdToPiece(a_ )
return token
def _a ( self , a_ ) -> Optional[int]:
_UpperCAmelCase = []
_UpperCAmelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
_UpperCAmelCase = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def _a ( self , a_ , a_ = None ) -> Tuple[str]:
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 657 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
_UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("RGB" )
return image
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(UpperCamelCase__ )
_UpperCAmelCase = val
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
_UpperCAmelCase = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
_UpperCAmelCase = qkv_bias
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = 364 if "coco" in model_name else 224
_UpperCAmelCase = InstructBlipVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_UpperCAmelCase = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_UpperCAmelCase = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
_UpperCAmelCase = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_2001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_UpperCAmelCase = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
_UpperCAmelCase = InstructBlipConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ , qformer_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
_UpperCAmelCase = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_UpperCAmelCase = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
_UpperCAmelCase , _UpperCAmelCase = get_blipa_config(UpperCamelCase__ )
_UpperCAmelCase = InstructBlipForConditionalGeneration(UpperCamelCase__ ).eval()
_UpperCAmelCase = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
_UpperCAmelCase , _UpperCAmelCase = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
_UpperCAmelCase = "cuda:1" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase = "cuda:2" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print("Done!" )
# update state dict keys
_UpperCAmelCase = original_model.state_dict()
_UpperCAmelCase = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith("Qformer.bert" ):
_UpperCAmelCase = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
_UpperCAmelCase = key.replace("self" , "attention" )
if "llm_proj" in key:
_UpperCAmelCase = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
_UpperCAmelCase = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
_UpperCAmelCase = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
_UpperCAmelCase = key.replace("t5" , "language" )
_UpperCAmelCase = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_UpperCAmelCase = load_demo_image()
_UpperCAmelCase = "What is unusual about this image?"
# create processor
_UpperCAmelCase = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
_UpperCAmelCase = InstructBlipProcessor(
image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ , )
_UpperCAmelCase = processor(images=UpperCamelCase__ , text=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
_UpperCAmelCase = vis_processors["eval"](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
_UpperCAmelCase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
_UpperCAmelCase = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
_UpperCAmelCase = hf_model(**UpperCamelCase__ ).logits
else:
_UpperCAmelCase = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
_UpperCAmelCase = tokenizer("\n" , return_tensors="pt" ).input_ids.to(UpperCamelCase__ )
_UpperCAmelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
_UpperCAmelCase = hf_model(**UpperCamelCase__ , labels=UpperCamelCase__ ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_UpperCAmelCase = 1E-4 if "vicuna" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCamelCase__ , atol=UpperCamelCase__ )
print("Looks ok!" )
print("Generating with original model..." )
_UpperCAmelCase = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
_UpperCAmelCase = hf_model.generate(
**UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_UpperCAmelCase = 2
print("Original generation:" , UpperCamelCase__ )
_UpperCAmelCase = processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
_UpperCAmelCase = [text.strip() for text in output_text]
print("HF generation:" , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(f"Salesforce/{model_name}" )
hf_model.push_to_hub(f"Salesforce/{model_name}" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__magic_name__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 657 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Tuple = '''time_series_transformer'''
lowercase_ : Any = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , a_ = None , a_ = None , a_ = "student_t" , a_ = "nll" , a_ = 1 , a_ = [1, 2, 3, 4, 5, 6, 7] , a_ = "mean" , a_ = 0 , a_ = 0 , a_ = 0 , a_ = 0 , a_ = None , a_ = None , a_ = 32 , a_ = 32 , a_ = 2 , a_ = 2 , a_ = 2 , a_ = 2 , a_ = True , a_ = "gelu" , a_ = 64 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 100 , a_ = 0.02 , a_=True , **a_ , ) -> List[Any]:
# time series specific configuration
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length or prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(a_ ) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def _a ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 657 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : List[str] = '''fnet'''
def __init__( self , a_=32000 , a_=768 , a_=12 , a_=3072 , a_="gelu_new" , a_=0.1 , a_=512 , a_=4 , a_=0.02 , a_=1e-12 , a_=False , a_=512 , a_=3 , a_=1 , a_=2 , **a_ , ) -> Optional[Any]:
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_tpu_fourier_optimizations
_UpperCAmelCase = tpu_short_seq_length
| 657 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__magic_name__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
_UpperCAmelCase = val
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCAmelCase = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
return new_state_dict
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCAmelCase = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:256, :]
_UpperCAmelCase = in_proj_bias[:256]
_UpperCAmelCase = in_proj_weight[256:512, :]
_UpperCAmelCase = in_proj_bias[256:512]
_UpperCAmelCase = in_proj_weight[-256:, :]
_UpperCAmelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCAmelCase = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:256, :]
_UpperCAmelCase = in_proj_bias[:256]
_UpperCAmelCase = in_proj_weight[256:512, :]
_UpperCAmelCase = in_proj_bias[256:512]
_UpperCAmelCase = in_proj_weight[-256:, :]
_UpperCAmelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCAmelCase = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_UpperCAmelCase = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCAmelCase = in_proj_weight_cross_attn[:256, :]
_UpperCAmelCase = in_proj_bias_cross_attn[:256]
_UpperCAmelCase = in_proj_weight_cross_attn[256:512, :]
_UpperCAmelCase = in_proj_bias_cross_attn[256:512]
_UpperCAmelCase = in_proj_weight_cross_attn[-256:, :]
_UpperCAmelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase = max(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = 800 if "detection" in checkpoint_url else 1000
_UpperCAmelCase = target_max_size / current_max_size
_UpperCAmelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = F.to_tensor(UpperCamelCase__ )
_UpperCAmelCase = F.normalize(UpperCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = rename_backbone_keys(UpperCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
_UpperCAmelCase = val
# create HuggingFace model and load state dict
_UpperCAmelCase = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_UpperCAmelCase = 15
_UpperCAmelCase = 2
_UpperCAmelCase = {0: "table", 1: "table rotated"}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 125
_UpperCAmelCase = 6
_UpperCAmelCase = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
_UpperCAmelCase = TableTransformerForObjectDetection(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify our conversion
_UpperCAmelCase = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_UpperCAmelCase = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=UpperCamelCase__ )
_UpperCAmelCase = Image.open(UpperCamelCase__ ).convert("RGB" )
_UpperCAmelCase = normalize(resize(UpperCamelCase__ , UpperCamelCase__ ) ).unsqueeze(0 )
_UpperCAmelCase = model(UpperCamelCase__ )
if "detection" in checkpoint_url:
_UpperCAmelCase = (1, 15, 3)
_UpperCAmelCase = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_UpperCAmelCase = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_UpperCAmelCase = (1, 125, 7)
_UpperCAmelCase = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_UpperCAmelCase = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_UpperCAmelCase = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(UpperCamelCase__ )
image_processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__magic_name__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 657 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
_UpperCAmelCase = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
sd_pipe.set_scheduler("sample_euler" )
_UpperCAmelCase = "A painting of a squirrel eating a burger"
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=a_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ) -> Any:
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCAmelCase = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
sd_pipe.set_scheduler("sample_euler" )
_UpperCAmelCase = "A painting of a squirrel eating a burger"
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=a_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCAmelCase = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
_UpperCAmelCase = "A painting of a squirrel eating a burger"
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe(
[prompt] , generator=a_ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a_ , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 1 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = AlbertConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 657 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''PoolFormerFeatureExtractor''']
__magic_name__ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
class _lowerCAmelCase : # Public class to implement a graph
def __init__( self , a_ , a_ , a_ ) -> None:
_UpperCAmelCase = row
_UpperCAmelCase = col
_UpperCAmelCase = graph
def _a ( self , a_ , a_ , a_ ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _a ( self , a_ , a_ , a_ ) -> None:
# Checking all 8 elements surrounding nth element
_UpperCAmelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_UpperCAmelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
_UpperCAmelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a_ )
def _a ( self ) -> int: # And finally, count all islands.
_UpperCAmelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
_UpperCAmelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a_ , a_ , a_ )
count += 1
return count
| 657 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Optional[int] = ['''speech''']
def __init__( self , *a_ , **a_ ) -> Any:
requires_backends(self , ["speech"] )
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : List[str] = ['''speech''']
def __init__( self , *a_ , **a_ ) -> Union[str, Any]:
requires_backends(self , ["speech"] )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_UpperCAmelCase = precision
_UpperCAmelCase = ceil(precision / 14 )
_UpperCAmelCase = 42_6880 * Decimal(1_0005 ).sqrt()
_UpperCAmelCase = 1
_UpperCAmelCase = 1359_1409
_UpperCAmelCase = Decimal(UpperCamelCase__ )
for k in range(1 , UpperCamelCase__ ):
_UpperCAmelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCamelCase__ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__magic_name__ = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 1 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase ):
lowercase_ : List[Any] = 1
@register_to_config
def __init__( self , a_ = 1000 , a_ = None ) -> Any:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(a_ )
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_UpperCAmelCase = 4
# running values
_UpperCAmelCase = []
def _a ( self , a_ , a_ = None ) -> Any:
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_UpperCAmelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_UpperCAmelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_UpperCAmelCase = torch.sin(steps * math.pi / 2 ) ** 2
_UpperCAmelCase = (1.0 - self.betas**2) ** 0.5
_UpperCAmelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_UpperCAmelCase = timesteps.to(a_ )
_UpperCAmelCase = []
def _a ( self , a_ , a_ , a_ , a_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
_UpperCAmelCase = (self.timesteps == timestep).nonzero().item()
_UpperCAmelCase = timestep_index + 1
_UpperCAmelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(a_ )
if len(self.ets ) == 1:
_UpperCAmelCase = self.ets[-1]
elif len(self.ets ) == 2:
_UpperCAmelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_UpperCAmelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_UpperCAmelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_UpperCAmelCase = self._get_prev_sample(a_ , a_ , a_ , a_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def _a ( self , a_ , *a_ , **a_ ) -> torch.FloatTensor:
return sample
def _a ( self , a_ , a_ , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = self.alphas[timestep_index]
_UpperCAmelCase = self.betas[timestep_index]
_UpperCAmelCase = self.alphas[prev_timestep_index]
_UpperCAmelCase = self.betas[prev_timestep_index]
_UpperCAmelCase = (sample - sigma * ets) / max(a_ , 1e-8 )
_UpperCAmelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Dict:
return self.config.num_train_timesteps
| 657 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 1 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__magic_name__ = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
__magic_name__ = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
__magic_name__ = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
__magic_name__ = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
__magic_name__ = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for tf_name, hf_name in patterns:
_UpperCAmelCase = k.replace(UpperCamelCase__ , UpperCamelCase__ )
return k
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = BigBirdPegasusConfig(**UpperCamelCase__ )
_UpperCAmelCase = BigBirdPegasusForConditionalGeneration(UpperCamelCase__ )
_UpperCAmelCase = torch_model.state_dict()
_UpperCAmelCase = {}
# separating decoder weights
_UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
_UpperCAmelCase = [k.endswith(UpperCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCamelCase__ ):
continue
_UpperCAmelCase = DECODER_PATTERNS
_UpperCAmelCase = rename_state_dict_key(UpperCamelCase__ , UpperCamelCase__ )
if new_k not in state_dict:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
_UpperCAmelCase = [k.endswith(UpperCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCamelCase__ ):
continue
_UpperCAmelCase = REMAINING_PATTERNS
_UpperCAmelCase = rename_state_dict_key(UpperCamelCase__ , UpperCamelCase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
_UpperCAmelCase = mapping["model.embed_positions.weight"]
_UpperCAmelCase = mapping.pop("model.embed_positions.weight" )
_UpperCAmelCase , _UpperCAmelCase = torch_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_UpperCAmelCase = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = tf.train.list_variables(UpperCamelCase__ )
_UpperCAmelCase = {}
_UpperCAmelCase = ["global_step"]
for name, shape in tqdm(UpperCamelCase__ , desc="converting tf checkpoint to dict" ):
_UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = array
return tf_weights
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = get_tf_weights_as_numpy(UpperCamelCase__ )
_UpperCAmelCase = convert_bigbird_pegasus(UpperCamelCase__ , UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__magic_name__ = parser.parse_args()
__magic_name__ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCAmelCase = k.replace(UpperCamelCase__ , UpperCamelCase__ )
if k.startswith("encoder" ):
_UpperCAmelCase = k.replace(".attn" , ".self_attn" )
_UpperCAmelCase = k.replace("norm1" , "self_attn_layer_norm" )
_UpperCAmelCase = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
_UpperCAmelCase = k.replace("norm1" , "self_attn_layer_norm" )
_UpperCAmelCase = k.replace("norm2" , "encoder_attn_layer_norm" )
_UpperCAmelCase = k.replace("norm3" , "final_layer_norm" )
return k
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_UpperCAmelCase = sd.pop(UpperCamelCase__ )
_UpperCAmelCase = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
_UpperCAmelCase = v
__magic_name__ = ['''START''']
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = torch.load(UpperCamelCase__ , map_location="cpu" )
_UpperCAmelCase = model["model"]
_UpperCAmelCase = BlenderbotConfig.from_json_file(UpperCamelCase__ )
_UpperCAmelCase = BlenderbotForConditionalGeneration(UpperCamelCase__ )
_UpperCAmelCase = m.model.state_dict().keys()
_UpperCAmelCase = []
_UpperCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCAmelCase = rename_state_dict_key(UpperCamelCase__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase__ )
m.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
m.half()
m.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
__magic_name__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
_UpperCAmelCase = {
"input_ids": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_UpperCAmelCase = model(a_ )["last_hidden_state"]
_UpperCAmelCase = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , a_ )
# compare the actual values for a slice.
_UpperCAmelCase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 657 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__magic_name__ = ['''small''', '''medium''', '''large''']
__magic_name__ = '''lm_head.decoder.weight'''
__magic_name__ = '''lm_head.weight'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = torch.load(UpperCamelCase__ )
_UpperCAmelCase = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__magic_name__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__magic_name__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__magic_name__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 657 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 1 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Any:
_UpperCAmelCase = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase = 100
self.assertEqual(kp.calc_profit(a_ , a_ , a_ ) , 210 )
def _a ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ , "max_weight must greater than zero." )
def _a ( self ) -> int:
self.assertRaisesRegex(a_ , "Weight can not be negative." )
def _a ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ , "Profit can not be negative." )
def _a ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ , "max_weight must greater than zero." )
def _a ( self ) -> Tuple:
self.assertRaisesRegex(
a_ , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 657 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ = 200 ):
"""simple docstring"""
_UpperCAmelCase = [1, 2, 5, 10, 20, 50, 100, 200]
_UpperCAmelCase = [0] * (pence + 1)
_UpperCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(UpperCamelCase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__magic_name__ = None
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ = {
'''facebook/nllb-large-en-ro''': 10_24,
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
__magic_name__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Dict = VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[Any] = ['''input_ids''', '''attention_mask''']
lowercase_ : str = NllbTokenizer
lowercase_ : List[int] = []
lowercase_ : List[int] = []
def __init__( self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , a_=False , **a_ , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
_UpperCAmelCase = legacy_behaviour
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , legacy_behaviour=a_ , **a_ , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
_UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
_UpperCAmelCase = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCAmelCase = src_lang if src_lang is not None else "eng_Latn"
_UpperCAmelCase = self.convert_tokens_to_ids(self._src_lang )
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , a_ ) -> None:
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , a_ , a_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self , a_ , a_ = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , a_ , a_ , a_ , a_ , **a_ ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
_UpperCAmelCase = self.convert_tokens_to_ids(a_ )
_UpperCAmelCase = tgt_lang_id
return inputs
def _a ( self , a_ , a_ = "eng_Latn" , a_ = None , a_ = "fra_Latn" , **a_ , ) -> BatchEncoding:
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def _a ( self ) -> int:
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , a_ ) -> None:
_UpperCAmelCase = self.convert_tokens_to_ids(a_ )
if self.legacy_behaviour:
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCAmelCase = [self.cur_lang_code]
_UpperCAmelCase = [self.eos_token_id]
_UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self , a_ ) -> None:
_UpperCAmelCase = self.convert_tokens_to_ids(a_ )
if self.legacy_behaviour:
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCAmelCase = [self.cur_lang_code]
_UpperCAmelCase = [self.eos_token_id]
_UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self , a_ , a_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
_UpperCAmelCase = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 1 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ , map_location="cpu" )["model"]
_UpperCAmelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase = {}
_UpperCAmelCase = "first_stage_model."
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase = {}
_UpperCAmelCase = "model.diffusion_model."
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase = config.model.params.first_stage_config.params
_UpperCAmelCase = config.model.params.unet_config.params
_UpperCAmelCase = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__magic_name__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : int = ['''input_features''', '''is_longer''']
def __init__( self , a_=64 , a_=48000 , a_=480 , a_=10 , a_=1024 , a_=0.0 , a_=False , a_ = 0 , a_ = 14000 , a_ = None , a_ = "fusion" , a_ = "repeatpad" , **a_ , ) -> Optional[Any]:
super().__init__(
feature_size=a_ , sampling_rate=a_ , padding_value=a_ , return_attention_mask=a_ , **a_ , )
_UpperCAmelCase = top_db
_UpperCAmelCase = truncation
_UpperCAmelCase = padding
_UpperCAmelCase = fft_window_size
_UpperCAmelCase = (fft_window_size >> 1) + 1
_UpperCAmelCase = hop_length
_UpperCAmelCase = max_length_s
_UpperCAmelCase = max_length_s * sampling_rate
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = frequency_min
_UpperCAmelCase = frequency_max
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=a_ , min_frequency=a_ , max_frequency=a_ , sampling_rate=a_ , norm=a_ , mel_scale="htk" , )
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=a_ , min_frequency=a_ , max_frequency=a_ , sampling_rate=a_ , norm="slaney" , mel_scale="slaney" , )
def _a ( self ) -> Dict[str, Any]:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self , a_ , a_ = None ) -> np.ndarray:
_UpperCAmelCase = spectrogram(
a_ , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=a_ , log_mel="dB" , )
return log_mel_spectrogram.T
def _a ( self , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase = [0]
# randomly choose index for each part
_UpperCAmelCase = np.random.choice(ranges[0] )
_UpperCAmelCase = np.random.choice(ranges[1] )
_UpperCAmelCase = np.random.choice(ranges[2] )
_UpperCAmelCase = mel[idx_front : idx_front + chunk_frames, :]
_UpperCAmelCase = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCAmelCase = mel[idx_back : idx_back + chunk_frames, :]
_UpperCAmelCase = torch.tensor(mel[None, None, :] )
_UpperCAmelCase = torch.nn.functional.interpolate(
a_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=a_ )
_UpperCAmelCase = mel_shrink[0][0].numpy()
_UpperCAmelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _a ( self , a_ , a_ , a_ , a_ ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCAmelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCAmelCase = len(a_ ) - max_length
_UpperCAmelCase = np.random.randint(0 , overflow + 1 )
_UpperCAmelCase = waveform[idx : idx + max_length]
_UpperCAmelCase = self._np_extract_fbank_features(a_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCAmelCase = self._np_extract_fbank_features(a_ , self.mel_filters )
_UpperCAmelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCAmelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCAmelCase = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCAmelCase = False
else:
_UpperCAmelCase = self._random_mel_fusion(a_ , a_ , a_ )
_UpperCAmelCase = True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented" )
else:
_UpperCAmelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCAmelCase = int(max_length / len(a_ ) )
_UpperCAmelCase = np.stack(np.tile(a_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCAmelCase = int(max_length / len(a_ ) )
_UpperCAmelCase = np.stack(np.tile(a_ , a_ ) )
_UpperCAmelCase = np.pad(a_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
_UpperCAmelCase = self._np_extract_fbank_features(a_ , self.mel_filters )
_UpperCAmelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCAmelCase = self._np_extract_fbank_features(a_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , **a_ , ) -> BatchFeature:
_UpperCAmelCase = truncation if truncation is not None else self.truncation
_UpperCAmelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase = isinstance(a_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_UpperCAmelCase = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray(a_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
_UpperCAmelCase = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [np.asarray(a_ )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCAmelCase = [
self._get_input_mel(a_ , max_length if max_length else self.nb_max_samples , a_ , a_ )
for waveform in raw_speech
]
_UpperCAmelCase = []
_UpperCAmelCase = []
for mel, longer in padded_inputs:
input_mel.append(a_ )
is_longer.append(a_ )
if truncation == "fusion" and sum(a_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCAmelCase = np.random.randint(0 , len(a_ ) )
_UpperCAmelCase = True
if isinstance(input_mel[0] , a_ ):
_UpperCAmelCase = [np.asarray(a_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCAmelCase = [[longer] for longer in is_longer]
_UpperCAmelCase = {"input_features": input_mel, "is_longer": is_longer}
_UpperCAmelCase = BatchFeature(a_ )
if return_tensors is not None:
_UpperCAmelCase = input_features.convert_to_tensors(a_ )
return input_features
| 657 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 1 |
"""simple docstring"""
from manim import *
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[Any]:
_UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCAmelCase = Rectangle(height=0.25 , width=0.25 )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*a_ ).arrange(a_ , buff=0 )
_UpperCAmelCase = VGroup(*a_ ).arrange(a_ , buff=0 )
_UpperCAmelCase = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
_UpperCAmelCase = Text("CPU" , font_size=24 )
_UpperCAmelCase = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
_UpperCAmelCase = [mem.copy() for i in range(4 )]
_UpperCAmelCase = VGroup(*a_ ).arrange(a_ , buff=0 )
_UpperCAmelCase = Text("GPU" , font_size=24 )
_UpperCAmelCase = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
gpu.move_to([-1, -1, 0] )
self.add(a_ )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*a_ ).arrange(a_ , buff=0 )
_UpperCAmelCase = Text("Model" , font_size=24 )
_UpperCAmelCase = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.add(a_ )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, rect in enumerate(a_ ):
_UpperCAmelCase = fill.copy().set_fill(a_ , opacity=0.8 )
target.move_to(a_ )
model_arr.append(a_ )
_UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(a_ )
self.add(*a_ , *a_ )
_UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*a_ ).arrange(a_ , buff=0 )
_UpperCAmelCase = VGroup(*a_ ).arrange(a_ , buff=0 )
_UpperCAmelCase = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
_UpperCAmelCase = Text("Disk" , font_size=24 )
_UpperCAmelCase = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
disk.move_to([-4, -1.25, 0] )
self.add(a_ , a_ )
_UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a_ , a_ )
_UpperCAmelCase = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(a_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a_ )
_UpperCAmelCase = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ ) )
_UpperCAmelCase = Square(0.3 )
input.set_fill(a_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , a_ , buff=0.5 )
self.play(Write(a_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=a_ , buff=0.02 )
self.play(MoveToTarget(a_ ) )
self.play(FadeOut(a_ ) )
_UpperCAmelCase = Arrow(start=a_ , end=a_ , color=a_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , a_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_UpperCAmelCase = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) )
_UpperCAmelCase = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(a_ ) , Circumscribe(model_arr[0] , color=a_ , **a_ ) , Circumscribe(model_cpu_arr[0] , color=a_ , **a_ ) , Circumscribe(gpu_rect[0] , color=a_ , **a_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_UpperCAmelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , a_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_UpperCAmelCase = AnimationGroup(
FadeOut(a_ , run_time=0.5 ) , MoveToTarget(a_ , run_time=0.5 ) , FadeIn(a_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(a_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_UpperCAmelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **a_ ) , Circumscribe(cpu_left_col_base[i] , **a_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=a_ , **a_ ) , Circumscribe(gpu_rect[0] , color=a_ , **a_ ) , Circumscribe(model_arr[i + 1] , color=a_ , **a_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=a_ , **a_ ) , Circumscribe(cpu_left_col_base[-1] , color=a_ , **a_ ) , Circumscribe(gpu_rect[0] , color=a_ , **a_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_UpperCAmelCase = a_c
_UpperCAmelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(a_ ) , FadeOut(a_ , run_time=0.5 ) , )
_UpperCAmelCase = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) , MoveToTarget(a_ ) )
self.wait()
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self ) -> int:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
_UpperCAmelCase = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self ) -> Dict:
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCAmelCase = DDPMScheduler()
_UpperCAmelCase = AudioDiffusionPipeline(vqvae=a_ , unet=self.dummy_unet , mel=a_ , scheduler=a_ )
_UpperCAmelCase = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase = torch.Generator(device=a_ ).manual_seed(42 )
_UpperCAmelCase = pipe(generator=a_ , steps=4 )
_UpperCAmelCase = output.audios[0]
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = torch.Generator(device=a_ ).manual_seed(42 )
_UpperCAmelCase = pipe(generator=a_ , steps=4 , return_dict=a_ )
_UpperCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
_UpperCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
_UpperCAmelCase = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vqvae_and_unet
_UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a_ , scheduler=a_ )
_UpperCAmelCase = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
_UpperCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCAmelCase = torch.Generator(device=a_ ).manual_seed(42 )
_UpperCAmelCase = pipe(raw_audio=a_ , generator=a_ , start_step=5 , steps=10 )
_UpperCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
_UpperCAmelCase = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCAmelCase = self.dummy_unet_condition
_UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a_ , mel=a_ , scheduler=a_ )
_UpperCAmelCase = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
_UpperCAmelCase = torch.rand((1, 1, 10) )
_UpperCAmelCase = pipe(generator=a_ , encoding=a_ )
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
_UpperCAmelCase = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = torch_device
_UpperCAmelCase = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
_UpperCAmelCase = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase = torch.Generator(device=a_ ).manual_seed(42 )
_UpperCAmelCase = pipe(generator=a_ )
_UpperCAmelCase = output.audios[0]
_UpperCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
_UpperCAmelCase = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 657 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.